3 * exception support for AMD64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
18 #define MINGW_HAS_SECURE_API 1
27 #ifdef HAVE_UCONTEXT_H
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
44 #include "mini-amd64.h"
47 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
50 static void (*restore_stack) (void);
51 static MonoW32ExceptionHandler fpe_handler;
52 static MonoW32ExceptionHandler ill_handler;
53 static MonoW32ExceptionHandler segv_handler;
55 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
56 void *mono_win_vectored_exception_handle;
58 #define W32_SEH_HANDLE_EX(_ex) \
59 if (_ex##_handler) _ex##_handler(0, ep, ctx)
61 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
63 #ifndef MONO_CROSS_COMPILE
64 if (mono_old_win_toplevel_exception_filter) {
65 return (*mono_old_win_toplevel_exception_filter)(ep);
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
75 get_win32_restore_stack (void)
77 static guint8 *start = NULL;
83 /* restore_stack (void) */
84 start = code = mono_global_codeman_reserve (128);
86 amd64_push_reg (code, AMD64_RBP);
87 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
89 /* push 32 bytes of stack space for Win64 calling convention */
90 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
92 /* restore guard page */
93 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
94 amd64_call_reg (code, AMD64_R11);
96 /* get jit_tls with context to restore */
97 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls);
98 amd64_call_reg (code, AMD64_R11);
100 /* move jit_tls from return reg to arg reg */
101 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
103 /* retrieve pointer to saved context */
104 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
106 /* this call does not return */
107 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
108 amd64_call_reg (code, AMD64_R11);
110 g_assert ((code - start) < 128);
112 mono_arch_flush_icache (start, code - start);
113 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
119 * Unhandled Exception Filter
120 * Top-level per-process exception handler.
122 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
124 EXCEPTION_RECORD* er;
127 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
128 MonoDomain* domain = mono_domain_get ();
130 /* If the thread is not managed by the runtime return early */
132 return EXCEPTION_CONTINUE_SEARCH;
134 jit_tls->mono_win_chained_exception_needs_run = FALSE;
135 res = EXCEPTION_CONTINUE_EXECUTION;
137 er = ep->ExceptionRecord;
138 ctx = ep->ContextRecord;
140 switch (er->ExceptionCode) {
141 case EXCEPTION_STACK_OVERFLOW:
142 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
143 /* need to restore stack protection once stack is unwound
144 * restore_stack will restore stack protection and then
145 * resume control to the saved stack_restore_ctx */
146 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
147 ctx->Rip = (guint64)restore_stack;
150 case EXCEPTION_ACCESS_VIOLATION:
151 W32_SEH_HANDLE_EX(segv);
153 case EXCEPTION_ILLEGAL_INSTRUCTION:
154 W32_SEH_HANDLE_EX(ill);
156 case EXCEPTION_INT_DIVIDE_BY_ZERO:
157 case EXCEPTION_INT_OVERFLOW:
158 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
159 case EXCEPTION_FLT_OVERFLOW:
160 case EXCEPTION_FLT_UNDERFLOW:
161 case EXCEPTION_FLT_INEXACT_RESULT:
162 W32_SEH_HANDLE_EX(fpe);
165 jit_tls->mono_win_chained_exception_needs_run = TRUE;
169 if (jit_tls->mono_win_chained_exception_needs_run) {
170 /* Don't copy context back if we chained exception
171 * as the handler may have modfied the EXCEPTION_POINTERS
172 * directly. We don't pass sigcontext to chained handlers.
173 * Return continue search so the UnhandledExceptionFilter
174 * can correctly chain the exception.
176 res = EXCEPTION_CONTINUE_SEARCH;
182 void win32_seh_init()
184 restore_stack = get_win32_restore_stack ();
186 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
187 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
190 void win32_seh_cleanup()
194 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
196 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
200 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
204 fpe_handler = handler;
207 ill_handler = handler;
210 segv_handler = handler;
217 #endif /* TARGET_WIN32 */
221 * mono_arch_get_restore_context:
223 * Returns a pointer to a method which restores a previously saved sigcontext.
226 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
228 guint8 *start = NULL;
230 MonoJumpInfo *ji = NULL;
231 GSList *unwind_ops = NULL;
234 /* restore_contect (MonoContext *ctx) */
236 start = code = (guint8 *)mono_global_codeman_reserve (256);
238 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
240 /* Restore all registers except %rip and %r11 */
241 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
242 for (i = 0; i < AMD64_NREG; ++i) {
243 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
244 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
248 * The context resides on the stack, in the stack frame of the
249 * caller of this function. The stack pointer that we need to
250 * restore is potentially many stack frames higher up, so the
251 * distance between them can easily be more than the red zone
252 * size. Hence the stack pointer can be restored only after
253 * we have finished loading everything from the context.
255 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
256 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
257 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
259 /* jump to the saved IP */
260 amd64_jump_reg (code, AMD64_R11);
262 mono_arch_flush_icache (start, code - start);
263 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
266 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
272 * mono_arch_get_call_filter:
274 * Returns a pointer to a method which calls an exception filter. We
275 * also use this function to call finally handlers (we pass NULL as
276 * @exc object in this case).
279 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
285 MonoJumpInfo *ji = NULL;
286 GSList *unwind_ops = NULL;
287 const guint kMaxCodeSize = 128;
289 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
291 /* call_filter (MonoContext *ctx, unsigned long eip) */
294 /* Alloc new frame */
295 amd64_push_reg (code, AMD64_RBP);
296 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
298 /* Save callee saved regs */
300 for (i = 0; i < AMD64_NREG; ++i)
301 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
302 amd64_push_reg (code, i);
308 amd64_push_reg (code, AMD64_RBP);
310 /* Make stack misaligned, the call will make it aligned again */
312 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
314 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
317 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
318 /* load callee saved regs */
319 for (i = 0; i < AMD64_NREG; ++i) {
320 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
321 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
323 /* load exc register */
324 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
326 /* call the handler */
327 amd64_call_reg (code, AMD64_ARG_REG2);
330 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
333 amd64_pop_reg (code, AMD64_RBP);
335 /* Restore callee saved regs */
336 for (i = AMD64_NREG; i >= 0; --i)
337 if (AMD64_IS_CALLEE_SAVED_REG (i))
338 amd64_pop_reg (code, i);
341 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
342 amd64_pop_reg (code, AMD64_RBP);
348 g_assert ((code - start) < kMaxCodeSize);
350 mono_arch_flush_icache (start, code - start);
351 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
354 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
358 #endif /* !DISABLE_JIT */
361 * The first few arguments are dummy, to force the other arguments to be passed on
362 * the stack, this avoids overwriting the argument registers in the throw trampoline.
365 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
366 guint64 dummy5, guint64 dummy6,
367 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
372 /* mctx is on the caller's stack */
373 memcpy (&ctx, mctx, sizeof (MonoContext));
375 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
376 MonoException *mono_ex = (MonoException*)exc;
378 mono_ex->stack_trace = NULL;
379 mono_ex->trace_ips = NULL;
382 mono_error_assert_ok (&error);
384 /* adjust eip so that it point into the call instruction */
385 ctx.gregs [AMD64_RIP] --;
387 mono_handle_exception (&ctx, exc);
388 mono_restore_context (&ctx);
389 g_assert_not_reached ();
393 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
394 guint64 dummy5, guint64 dummy6,
395 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
397 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
400 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
402 mctx->gregs [AMD64_RIP] -= pc_offset;
404 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
405 mctx->gregs [AMD64_RIP] += 1;
407 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
411 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
412 guint64 dummy5, guint64 dummy6,
413 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
415 /* Only the register parameters are valid */
418 /* mctx is on the caller's stack */
419 memcpy (&ctx, mctx, sizeof (MonoContext));
421 mono_resume_unwind (&ctx);
426 * get_throw_trampoline:
428 * Generate a call to mono_amd64_throw_exception/
429 * mono_amd64_throw_corlib_exception.
432 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
436 MonoJumpInfo *ji = NULL;
437 GSList *unwind_ops = NULL;
438 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
439 const guint kMaxCodeSize = 256;
442 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
444 dummy_stack_space = 0;
448 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
450 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
452 /* The stack is unaligned on entry */
453 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
458 unwind_ops = mono_arch_get_cie_program ();
461 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
463 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
464 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
468 * To hide linux/windows calling convention differences, we pass all arguments on
469 * the stack by passing 6 dummy values in registers.
472 arg_offsets [0] = dummy_stack_space + 0;
473 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
474 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
475 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
476 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
479 for (i = 0; i < AMD64_NREG; ++i)
481 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
483 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
484 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
486 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
487 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
488 /* Set arg1 == ctx */
489 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
490 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
491 /* Set arg2 == exc/ex_token_index */
493 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
495 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
496 /* Set arg3 == rethrow/pc offset */
498 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
502 * The caller doesn't pass in a pc/pc offset, instead we simply use the
503 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
505 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
507 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
509 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
513 const char *icall_name;
516 icall_name = "mono_amd64_resume_unwind";
518 icall_name = "mono_amd64_throw_corlib_exception";
520 icall_name = "mono_amd64_throw_exception";
521 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
522 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
524 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
526 amd64_call_reg (code, AMD64_R11);
527 amd64_breakpoint (code);
529 mono_arch_flush_icache (start, code - start);
531 g_assert ((code - start) < kMaxCodeSize);
532 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
534 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
537 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
543 * mono_arch_get_throw_exception:
544 * \returns a function pointer which can be used to raise
545 * exceptions. The returned function has the following
546 * signature: void (*func) (MonoException *exc);
549 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
551 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
555 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
557 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
561 * mono_arch_get_throw_corlib_exception:
563 * Returns a function pointer which can be used to raise
564 * corlib exceptions. The returned function has the following
565 * signature: void (*func) (guint32 ex_token, guint32 offset);
566 * Here, offset is the offset which needs to be substracted from the caller IP
567 * to get the IP of the throw. Passing the offset has the advantage that it
568 * needs no relocations in the caller.
571 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
573 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
575 #endif /* !DISABLE_JIT */
578 * mono_arch_unwind_frame:
580 * This function is used to gather information from @ctx, and store it in @frame_info.
581 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
582 * is modified if needed.
583 * Returns TRUE on success, FALSE otherwise.
586 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
587 MonoJitInfo *ji, MonoContext *ctx,
588 MonoContext *new_ctx, MonoLMF **lmf,
589 mgreg_t **save_locations,
590 StackFrameInfo *frame)
592 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
595 memset (frame, 0, sizeof (StackFrameInfo));
601 mgreg_t regs [MONO_MAX_IREGS + 1];
603 guint32 unwind_info_len;
605 guint8 *epilog = NULL;
607 if (ji->is_trampoline)
608 frame->type = FRAME_TYPE_TRAMPOLINE;
610 frame->type = FRAME_TYPE_MANAGED;
612 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
614 frame->unwind_info = unwind_info;
615 frame->unwind_info_len = unwind_info_len;
618 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
619 mono_print_unwind_info (unwind_info, unwind_info_len);
621 /* LLVM compiled code doesn't have this info */
622 if (ji->has_arch_eh_info)
623 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
625 for (i = 0; i < AMD64_NREG; ++i)
626 regs [i] = new_ctx->gregs [i];
628 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
629 (guint8*)ji->code_start + ji->code_size,
630 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
631 save_locations, MONO_MAX_IREGS, &cfa);
633 for (i = 0; i < AMD64_NREG; ++i)
634 new_ctx->gregs [i] = regs [i];
636 /* The CFA becomes the new SP value */
637 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
640 new_ctx->gregs [AMD64_RIP] --;
646 if (((guint64)(*lmf)->previous_lmf) & 2) {
647 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
649 if (ext->debugger_invoke) {
651 * This LMF entry is created by the soft debug code to mark transitions to
652 * managed code done during invokes.
654 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
655 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
656 } else if (ext->interp_exit) {
657 frame->type = FRAME_TYPE_INTERP_TO_MANAGED;
658 frame->interp_exit_data = ext->interp_exit_data;
660 g_assert_not_reached ();
663 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
668 if (((guint64)(*lmf)->previous_lmf) & 4) {
669 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
671 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
672 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
673 /* This LMF has the rip field set */
675 } else if ((*lmf)->rsp == 0) {
680 * The rsp field is set just before the call which transitioned to native
681 * code. Obtain the rip from the stack.
683 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
686 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
688 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
689 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
697 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
699 if (((guint64)(*lmf)->previous_lmf) & 4) {
700 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
702 /* Trampoline frame */
703 for (i = 0; i < AMD64_NREG; ++i)
704 new_ctx->gregs [i] = ext->ctx->gregs [i];
706 new_ctx->gregs [AMD64_RIP] --;
709 * The registers saved in the LMF will be restored using the normal unwind info,
710 * when the wrapper frame is processed.
714 new_ctx->gregs [AMD64_RIP] = rip;
715 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
716 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
717 for (i = 0; i < AMD64_NREG; ++i) {
718 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
719 new_ctx->gregs [i] = 0;
723 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
734 * Called by resuming from a signal handler.
737 handle_signal_exception (gpointer obj)
739 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
742 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
744 mono_handle_exception (&ctx, (MonoObject *)obj);
746 mono_restore_context (&ctx);
750 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
752 guint64 sp = ctx->gregs [AMD64_RSP];
754 ctx->gregs [AMD64_RDI] = (guint64)user_data;
756 /* Allocate a stack frame below the red zone */
758 /* The stack should be unaligned */
762 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
763 *(guint64*)sp = ctx->gregs [AMD64_RIP];
765 ctx->gregs [AMD64_RSP] = sp;
766 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
770 * mono_arch_handle_exception:
771 * \param ctx saved processor state
772 * \param obj the exception object
775 mono_arch_handle_exception (void *sigctx, gpointer obj)
777 #if defined(MONO_ARCH_USE_SIGACTION)
781 * Handling the exception in the signal handler is problematic, since the original
782 * signal is disabled, and we could run arbitrary code though the debugger. So
783 * resume into the normal stack and do most work there if possible.
785 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
787 /* Pass the ctx parameter in TLS */
788 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
790 mctx = jit_tls->ex_ctx;
791 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
792 mono_monoctx_to_sigctx (&mctx, sigctx);
798 mono_sigctx_to_monoctx (sigctx, &mctx);
800 mono_handle_exception (&mctx, obj);
802 mono_monoctx_to_sigctx (&mctx, sigctx);
809 mono_arch_ip_from_context (void *sigctx)
811 #if defined(MONO_ARCH_USE_SIGACTION)
812 ucontext_t *ctx = (ucontext_t*)sigctx;
814 return (gpointer)UCONTEXT_REG_RIP (ctx);
815 #elif defined(HOST_WIN32)
816 return ((CONTEXT*)sigctx)->Rip;
818 MonoContext *ctx = sigctx;
819 return (gpointer)ctx->gregs [AMD64_RIP];
824 restore_soft_guard_pages (void)
826 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
827 if (jit_tls->stack_ovf_guard_base)
828 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
832 * this function modifies mctx so that when it is restored, it
833 * won't execcute starting at mctx.eip, but in a function that
834 * will restore the protection on the soft-guard pages and return back to
835 * continue at mctx.eip.
838 prepare_for_guard_pages (MonoContext *mctx)
841 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
843 /* the return addr */
844 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
845 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
846 mctx->gregs [AMD64_RSP] = (guint64)sp;
850 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
853 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
856 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
860 mono_handle_exception (&mctx, obj);
862 prepare_for_guard_pages (&mctx);
863 mono_restore_context (&mctx);
867 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
869 #if defined(MONO_ARCH_USE_SIGACTION)
870 MonoException *exc = NULL;
873 MonoContext *copied_ctx;
876 exc = mono_domain_get ()->stack_overflow_ex;
878 /* setup a call frame on the real stack so that control is returned there
879 * and exception handling can continue.
880 * The frame looks like:
884 * 128 is the size of the red zone
886 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
889 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
890 sp = (gpointer *)((char*)sp - frame_size);
891 copied_ctx = (MonoContext*)(sp + 4);
892 /* the arguments must be aligned */
893 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
894 mono_sigctx_to_monoctx (sigctx, copied_ctx);
895 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
896 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
897 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
898 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
899 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
900 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
905 mono_amd64_get_original_ip (void)
907 MonoLMF *lmf = mono_get_lmf ();
911 /* Reset the change to previous_lmf */
912 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
919 mono_amd64_get_exception_trampolines (gboolean aot)
922 GSList *tramps = NULL;
924 /* LLVM needs different throw trampolines */
925 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
926 tramps = g_slist_prepend (tramps, info);
928 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
929 tramps = g_slist_prepend (tramps, info);
931 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
932 tramps = g_slist_prepend (tramps, info);
936 #endif /* !DISABLE_JIT */
939 mono_arch_exceptions_init (void)
945 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
946 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
947 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
948 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
949 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
950 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
952 /* Call this to avoid initialization races */
953 tramps = mono_amd64_get_exception_trampolines (FALSE);
954 for (l = tramps; l; l = l->next) {
955 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
957 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
958 mono_tramp_info_register (info, NULL);
960 g_slist_free (tramps);
964 // Implies defined(TARGET_WIN32)
965 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
968 mono_arch_unwindinfo_create (gpointer* monoui)
970 PUNWIND_INFO newunwindinfo;
971 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
972 newunwindinfo->Version = 1;
976 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
978 PUNWIND_CODE unwindcode;
981 g_assert (unwindinfo != NULL);
983 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
984 g_error ("Larger allocation needed for the unwind information.");
986 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
987 unwindcode = &unwindinfo->UnwindCode [codeindex];
988 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
989 unwindcode->CodeOffset = (guchar)unwind_op->when;
990 unwindcode->OpInfo = unwind_op->reg;
992 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
993 g_error ("Adding unwind info in wrong order.");
995 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
999 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1001 PUNWIND_CODE unwindcode;
1004 g_assert (unwindinfo != NULL);
1006 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1007 g_error ("Larger allocation needed for the unwind information.");
1009 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1010 unwindcode = &unwindinfo->UnwindCode [codeindex];
1011 unwindcode->UnwindOp = UWOP_SET_FPREG;
1012 unwindcode->CodeOffset = (guchar)unwind_op->when;
1014 g_assert (unwind_op->val % 16 == 0);
1015 unwindinfo->FrameRegister = unwind_op->reg;
1016 unwindinfo->FrameOffset = unwind_op->val / 16;
1018 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1019 g_error ("Adding unwind info in wrong order.");
1021 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1025 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1027 PUNWIND_CODE unwindcode;
1032 g_assert (unwindinfo != NULL);
1034 size = unwind_op->val;
1037 g_error ("Stack allocation must be equal to or greater than 0x8.");
1041 else if (size <= 0x7FFF8)
1046 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1047 g_error ("Larger allocation needed for the unwind information.");
1049 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1050 unwindcode = &unwindinfo->UnwindCode [codeindex];
1052 unwindcode->CodeOffset = (guchar)unwind_op->when;
1054 if (codesneeded == 1) {
1055 /*The size of the allocation is
1056 (the number in the OpInfo member) times 8 plus 8*/
1057 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1058 unwindcode->OpInfo = (size - 8)/8;
1061 if (codesneeded == 3) {
1062 /*the unscaled size of the allocation is recorded
1063 in the next two slots in little-endian format.
1064 NOTE, unwind codes are allocated from end to begining of list so
1065 unwind code will have right execution order. List is sorted on CodeOffset
1066 using descending sort order.*/
1067 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1068 unwindcode->OpInfo = 1;
1069 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1072 /*the size of the allocation divided by 8
1073 is recorded in the next slot.
1074 NOTE, unwind codes are allocated from end to begining of list so
1075 unwind code will have right execution order. List is sorted on CodeOffset
1076 using descending sort order.*/
1077 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1078 unwindcode->OpInfo = 0;
1079 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1083 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1084 g_error ("Adding unwind info in wrong order.");
1086 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1089 static gboolean g_dyn_func_table_inited;
1091 // Dynamic function table used when registering unwind info for OS unwind support.
1092 static GList *g_dynamic_function_table_begin;
1093 static GList *g_dynamic_function_table_end;
1095 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1096 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1098 // Module handle used when explicit loading ntdll.
1099 static HMODULE g_ntdll;
1101 // If Win8 or Win2012Server or later, use growable function tables instead
1102 // of callbacks. Callback solution will still be fallback on older systems.
1103 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1104 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1105 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1107 // When using function table callback solution an out of proc module is needed by
1108 // debuggers in order to read unwind info from debug target.
1110 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1112 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1115 #define MONO_DAC_MODULE_MAX_PATH 1024
1118 init_table_no_lock (void)
1120 if (g_dyn_func_table_inited == FALSE) {
1121 g_assert_checked (g_dynamic_function_table_begin == NULL);
1122 g_assert_checked (g_dynamic_function_table_end == NULL);
1123 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1124 g_assert_checked (g_rtl_grow_function_table == NULL);
1125 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1126 g_assert_checked (g_ntdll == NULL);
1128 // Load functions available on Win8/Win2012Server or later. If running on earlier
1129 // systems the below GetProceAddress will fail, this is expected behavior.
1130 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1131 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1132 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1133 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1136 g_dyn_func_table_inited = TRUE;
1141 mono_arch_unwindinfo_init_table (void)
1143 if (g_dyn_func_table_inited == FALSE) {
1145 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1147 init_table_no_lock ();
1149 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1154 terminate_table_no_lock (void)
1156 if (g_dyn_func_table_inited == TRUE) {
1157 if (g_dynamic_function_table_begin != NULL) {
1158 // Free all list elements.
1159 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1167 g_list_free (g_dynamic_function_table_begin);
1168 g_dynamic_function_table_begin = NULL;
1169 g_dynamic_function_table_end = NULL;
1172 g_rtl_delete_growable_function_table = NULL;
1173 g_rtl_grow_function_table = NULL;
1174 g_rtl_add_growable_function_table = NULL;
1176 if (g_ntdll != NULL) {
1177 FreeLibrary (g_ntdll);
1181 g_dyn_func_table_inited = FALSE;
1186 mono_arch_unwindinfo_terminate_table (void)
1188 if (g_dyn_func_table_inited == TRUE) {
1190 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1192 terminate_table_no_lock ();
1194 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1199 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1201 GList *found_entry = NULL;
1203 // Fast path, look at boundaries.
1204 if (g_dynamic_function_table_begin != NULL) {
1205 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1206 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1208 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1209 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1210 // Entry belongs to first entry in list.
1211 found_entry = g_dynamic_function_table_begin;
1212 *continue_search = FALSE;
1214 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1215 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1216 // Entry has a range that could exist in table, continue search.
1217 *continue_search = TRUE;
1226 static inline DynamicFunctionTableEntry *
1227 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1229 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1230 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1234 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1236 GList *found_entry = NULL;
1237 gboolean continue_search = FALSE;
1239 gsize begin_range = (gsize)code_block;
1240 gsize end_range = begin_range + block_size;
1242 // Fast path, check table boundaries.
1243 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1244 if (found_entry || continue_search == FALSE)
1247 // Scan table for an entry including range.
1248 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1249 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1250 g_assert_checked (current_entry != NULL);
1252 // Do we have a match?
1253 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1262 static inline DynamicFunctionTableEntry *
1263 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1265 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1266 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1270 find_pc_in_table_no_lock_ex (const gpointer pc)
1272 GList *found_entry = NULL;
1273 gboolean continue_search = FALSE;
1275 gsize begin_range = (gsize)pc;
1276 gsize end_range = begin_range;
1278 // Fast path, check table boundaries.
1279 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1280 if (found_entry || continue_search == FALSE)
1283 // Scan table for a entry including range.
1284 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1285 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1286 g_assert_checked (current_entry != NULL);
1288 // Do we have a match?
1289 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1298 static inline DynamicFunctionTableEntry *
1299 find_pc_in_table_no_lock (const gpointer pc)
1301 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1302 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1305 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1307 validate_table_no_lock (void)
1309 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1310 // Method will assert on failure to explicitly indicate what check failed.
1311 if (g_dynamic_function_table_begin != NULL) {
1312 g_assert_checked (g_dynamic_function_table_end != NULL);
1314 DynamicFunctionTableEntry *prevoious_entry = NULL;
1315 DynamicFunctionTableEntry *current_entry = NULL;
1316 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1317 current_entry = (DynamicFunctionTableEntry *)node->data;
1319 g_assert_checked (current_entry != NULL);
1320 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1322 if (prevoious_entry != NULL) {
1323 // List should be sorted in descending order on begin_range.
1324 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1326 // Check for overlapped regions.
1327 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1330 prevoious_entry = current_entry;
1338 validate_table_no_lock (void)
1342 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1345 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1347 DynamicFunctionTableEntry *
1348 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1350 DynamicFunctionTableEntry *new_entry = NULL;
1352 gsize begin_range = (gsize)code_block;
1353 gsize end_range = begin_range + block_size;
1355 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1356 init_table_no_lock ();
1357 new_entry = find_range_in_table_no_lock (code_block, block_size);
1358 if (new_entry == NULL) {
1359 // Allocate new entry.
1360 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1361 if (new_entry != NULL) {
1363 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1364 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1365 InitializeSRWLock (&new_entry->lock);
1366 new_entry->handle = NULL;
1367 new_entry->begin_range = begin_range;
1368 new_entry->end_range = end_range;
1369 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1370 new_entry->rt_funcs_current_count = 0;
1371 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1373 if (new_entry->rt_funcs != NULL) {
1374 // Check insert on boundaries. List is sorted descending on begin_range.
1375 if (g_dynamic_function_table_begin == NULL) {
1376 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1377 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1378 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1379 // Insert at the head.
1380 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1381 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1383 g_list_append (g_dynamic_function_table_end, new_entry);
1384 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1386 //Search and insert at correct position.
1387 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1388 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1389 g_assert_checked (current_entry != NULL);
1391 if (current_entry->begin_range < new_entry->begin_range) {
1392 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1398 // Register dynamic function table entry with OS.
1399 if (g_rtl_add_growable_function_table != NULL) {
1400 // Allocate new growable handle table for entry.
1401 g_assert_checked (new_entry->handle == NULL);
1402 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1403 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1404 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1407 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1408 WCHAR *path = buffer;
1410 // DAC module should be in the same directory as the
1412 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1413 path = wcsrchr (buffer, TEXT('\\'));
1419 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1422 // Register function table callback + out of proc module.
1423 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1424 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1425 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1426 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1430 // Only included in checked builds. Validates the structure of table after insert.
1431 validate_table_no_lock ();
1439 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1445 remove_range_in_table_no_lock (GList *entry)
1447 if (entry != NULL) {
1448 if (entry == g_dynamic_function_table_end)
1449 g_dynamic_function_table_end = entry->prev;
1451 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1452 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1454 g_assert_checked (removed_entry != NULL);
1455 g_assert_checked (removed_entry->rt_funcs != NULL);
1457 // Remove function table from OS.
1458 if (removed_entry->handle != NULL) {
1459 if (g_rtl_delete_growable_function_table != NULL) {
1460 g_rtl_delete_growable_function_table (removed_entry->handle);
1462 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1466 g_free (removed_entry->rt_funcs);
1467 g_free (removed_entry);
1469 g_list_free_1 (entry);
1472 // Only included in checked builds. Validates the structure of table after remove.
1473 validate_table_no_lock ();
1477 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1479 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1481 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1483 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1484 remove_range_in_table_no_lock (found_entry);
1486 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1490 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1492 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1494 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1496 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1497 remove_range_in_table_no_lock (found_entry);
1499 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1503 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1505 PRUNTIME_FUNCTION found_rt_func = NULL;
1507 gsize begin_range = (gsize)code;
1508 gsize end_range = begin_range + code_size;
1510 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1512 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1514 if (found_entry != NULL) {
1516 AcquireSRWLockShared (&found_entry->lock);
1518 g_assert_checked (found_entry->begin_range <= begin_range);
1519 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1520 g_assert_checked (found_entry->rt_funcs != NULL);
1522 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1523 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1525 // Is this our RT function entry?
1526 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1527 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1528 found_rt_func = current_rt_func;
1533 ReleaseSRWLockShared (&found_entry->lock);
1536 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1538 return found_rt_func;
1541 static inline PRUNTIME_FUNCTION
1542 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1544 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1547 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1549 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1551 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1552 // Method will assert on failure to explicitly indicate what check failed.
1553 g_assert_checked (entry != NULL);
1554 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1555 g_assert_checked (entry->rt_funcs != NULL);
1557 PRUNTIME_FUNCTION current_rt_func = NULL;
1558 PRUNTIME_FUNCTION previous_rt_func = NULL;
1559 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1560 current_rt_func = &(entry->rt_funcs [i]);
1562 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1563 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1565 if (previous_rt_func != NULL) {
1566 // List should be sorted in ascending order based on BeginAddress.
1567 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1569 // Check for overlapped regions.
1570 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1573 previous_rt_func = current_rt_func;
1580 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1584 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1587 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1589 PRUNTIME_FUNCTION new_rt_func = NULL;
1591 gsize begin_range = (gsize)code;
1592 gsize end_range = begin_range + code_size;
1594 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1596 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1598 if (found_entry != NULL) {
1600 AcquireSRWLockExclusive (&found_entry->lock);
1602 g_assert_checked (found_entry->begin_range <= begin_range);
1603 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1604 g_assert_checked (found_entry->rt_funcs != NULL);
1605 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1607 gsize code_offset = (gsize)code - found_entry->begin_range;
1608 gsize entry_count = found_entry->rt_funcs_current_count;
1609 gsize max_entry_count = found_entry->rt_funcs_max_count;
1610 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1612 RUNTIME_FUNCTION new_rt_func_data;
1613 new_rt_func_data.BeginAddress = code_offset;
1614 new_rt_func_data.EndAddress = code_offset + code_size;
1616 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1617 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1619 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1621 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1623 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1624 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1625 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1626 new_rt_func = &(current_rt_funcs [entry_count]);
1627 *new_rt_func = new_rt_func_data;
1630 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1631 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1632 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1634 if (new_rt_funcs != NULL) {
1635 gsize from_index = 0;
1638 // Copy from old table into new table. Make sure new rt func gets inserted
1639 // into correct location based on sort order.
1640 for (; from_index < entry_count; ++from_index) {
1641 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1642 new_rt_func = &(new_rt_funcs [to_index++]);
1643 *new_rt_func = new_rt_func_data;
1646 if (current_rt_funcs [from_index].UnwindData != 0)
1647 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1650 // If we didn't insert by now, put it last in the list.
1651 if (new_rt_func == NULL) {
1652 new_rt_func = &(new_rt_funcs [to_index]);
1653 *new_rt_func = new_rt_func_data;
1660 // Update the stats for current entry.
1661 found_entry->rt_funcs_current_count = entry_count;
1662 found_entry->rt_funcs_max_count = max_entry_count;
1664 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1665 // No new table just report increase in use.
1666 g_assert_checked (found_entry->handle != NULL);
1667 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1668 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1669 // New table, delete old table and rt funcs, and register a new one.
1670 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1671 g_rtl_delete_growable_function_table (found_entry->handle);
1672 found_entry->handle = NULL;
1673 g_free (found_entry->rt_funcs);
1674 found_entry->rt_funcs = new_rt_funcs;
1675 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1676 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1677 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1679 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1680 // No table registered with OS, callback solution in use. Switch tables.
1681 g_free (found_entry->rt_funcs);
1682 found_entry->rt_funcs = new_rt_funcs;
1683 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1684 // No table registered with OS, callback solution in use, nothing to do.
1686 g_assert_not_reached ();
1689 // Only included in checked builds. Validates the structure of table after insert.
1690 validate_rt_funcs_in_table_no_lock (found_entry);
1692 ReleaseSRWLockExclusive (&found_entry->lock);
1695 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1700 static PRUNTIME_FUNCTION
1701 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1703 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1707 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1709 if (unwind_ops != NULL && unwindinfo != NULL) {
1710 MonoUnwindOp *unwind_op_data;
1711 gboolean sp_alloced = FALSE;
1712 gboolean fp_alloced = FALSE;
1714 // Replay collected unwind info and setup Windows format.
1715 for (GSList *l = unwind_ops; l; l = l->next) {
1716 unwind_op_data = (MonoUnwindOp *)l->data;
1717 switch (unwind_op_data->op) {
1718 case DW_CFA_offset : {
1719 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1720 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1721 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1722 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1725 case DW_CFA_mono_sp_alloc_info_win64 : {
1726 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1730 case DW_CFA_mono_fp_alloc_info_win64 : {
1731 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1743 initialize_unwind_info_internal (GSList *unwind_ops)
1745 PUNWIND_INFO unwindinfo;
1747 mono_arch_unwindinfo_create (&unwindinfo);
1748 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1754 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1756 UNWIND_INFO unwindinfo = {0};
1757 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1758 return unwindinfo.CountOfCodes;
1762 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1764 MonoCompile * current_cfg = (MonoCompile *)cfg;
1765 g_assert (current_cfg->arch.unwindinfo == NULL);
1766 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1767 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1771 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1773 PUNWIND_INFO unwindinfo, targetinfo;
1775 guint64 targetlocation;
1779 unwindinfo = (PUNWIND_INFO)*monoui;
1780 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1781 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1783 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1785 codecount = unwindinfo->CountOfCodes;
1787 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1788 sizeof (UNWIND_CODE) * codecount);
1791 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1793 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1794 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1796 for (int current = 0; current < codecount; current++) {
1797 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1799 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1800 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1808 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1810 g_free (unwindinfo);
1813 // Register unwind info in table.
1814 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1818 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1820 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1821 if (unwindinfo != NULL) {
1822 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1827 mono_arch_code_chunk_new (void *chunk, int size)
1829 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1832 void mono_arch_code_chunk_destroy (void *chunk)
1834 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1836 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1838 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1839 MonoContinuationRestore
1840 mono_tasklets_arch_restore (void)
1842 static guint8* saved = NULL;
1843 guint8 *code, *start;
1844 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1845 const guint kMaxCodeSize = 64;
1849 return (MonoContinuationRestore)saved;
1850 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1851 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1852 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1853 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1854 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1855 * We move cont to cont_reg since we need both rcx and rdi for the copy
1856 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1858 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1859 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1860 /* setup the copy of the stack */
1861 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1862 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1864 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1865 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1866 amd64_prefix (code, X86_REP_PREFIX);
1869 /* now restore the registers from the LMF */
1870 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1871 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1872 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1875 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1877 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1880 /* state is already in rax */
1881 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1882 g_assert ((code - start) <= kMaxCodeSize);
1884 mono_arch_flush_icache (start, code - start);
1885 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1888 return (MonoContinuationRestore)saved;
1890 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1893 * mono_arch_setup_resume_sighandler_ctx:
1895 * Setup CTX so execution continues at FUNC.
1898 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1901 * When resuming from a signal handler, the stack should be misaligned, just like right after
1904 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1905 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1906 MONO_CONTEXT_SET_IP (ctx, func);
1911 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1913 g_assert_not_reached ();
1918 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1920 g_assert_not_reached ();
1925 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1927 g_assert_not_reached ();
1932 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1934 g_assert_not_reached ();
1939 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1941 g_assert_not_reached ();
1946 mono_amd64_get_exception_trampolines (gboolean aot)
1948 g_assert_not_reached ();
1951 #endif /* DISABLE_JIT */
1953 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1954 MonoContinuationRestore
1955 mono_tasklets_arch_restore (void)
1957 g_assert_not_reached ();
1960 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */