3 * exception support for AMD64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
18 #define MINGW_HAS_SECURE_API 1
27 #ifdef HAVE_UCONTEXT_H
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
44 #include "mini-amd64.h"
47 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
50 static void (*restore_stack) (void);
51 static MonoW32ExceptionHandler fpe_handler;
52 static MonoW32ExceptionHandler ill_handler;
53 static MonoW32ExceptionHandler segv_handler;
55 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
56 void *mono_win_vectored_exception_handle;
58 #define W32_SEH_HANDLE_EX(_ex) \
59 if (_ex##_handler) _ex##_handler(0, ep, ctx)
61 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
63 #ifndef MONO_CROSS_COMPILE
64 if (mono_old_win_toplevel_exception_filter) {
65 return (*mono_old_win_toplevel_exception_filter)(ep);
69 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
71 return EXCEPTION_CONTINUE_SEARCH;
75 get_win32_restore_stack (void)
77 static guint8 *start = NULL;
83 /* restore_stack (void) */
84 start = code = mono_global_codeman_reserve (128);
86 amd64_push_reg (code, AMD64_RBP);
87 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
89 /* push 32 bytes of stack space for Win64 calling convention */
90 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
92 /* restore guard page */
93 amd64_mov_reg_imm (code, AMD64_R11, _resetstkoflw);
94 amd64_call_reg (code, AMD64_R11);
96 /* get jit_tls with context to restore */
97 amd64_mov_reg_imm (code, AMD64_R11, mono_tls_get_jit_tls);
98 amd64_call_reg (code, AMD64_R11);
100 /* move jit_tls from return reg to arg reg */
101 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
103 /* retrieve pointer to saved context */
104 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, stack_restore_ctx));
106 /* this call does not return */
107 amd64_mov_reg_imm (code, AMD64_R11, mono_restore_context);
108 amd64_call_reg (code, AMD64_R11);
110 g_assert ((code - start) < 128);
112 mono_arch_flush_icache (start, code - start);
113 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
119 * Unhandled Exception Filter
120 * Top-level per-process exception handler.
122 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
124 EXCEPTION_RECORD* er;
127 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
128 MonoDomain* domain = mono_domain_get ();
130 /* If the thread is not managed by the runtime return early */
132 return EXCEPTION_CONTINUE_SEARCH;
134 jit_tls->mono_win_chained_exception_needs_run = FALSE;
135 res = EXCEPTION_CONTINUE_EXECUTION;
137 er = ep->ExceptionRecord;
138 ctx = ep->ContextRecord;
140 switch (er->ExceptionCode) {
141 case EXCEPTION_STACK_OVERFLOW:
142 if (!mono_aot_only) {
143 if (mono_arch_handle_exception (ctx, domain->stack_overflow_ex)) {
144 /* need to restore stack protection once stack is unwound
145 * restore_stack will restore stack protection and then
146 * resume control to the saved stack_restore_ctx */
147 mono_sigctx_to_monoctx (ctx, &jit_tls->stack_restore_ctx);
148 ctx->Rip = (guint64)restore_stack;
151 jit_tls->mono_win_chained_exception_needs_run = TRUE;
154 case EXCEPTION_ACCESS_VIOLATION:
155 W32_SEH_HANDLE_EX(segv);
157 case EXCEPTION_ILLEGAL_INSTRUCTION:
158 W32_SEH_HANDLE_EX(ill);
160 case EXCEPTION_INT_DIVIDE_BY_ZERO:
161 case EXCEPTION_INT_OVERFLOW:
162 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
163 case EXCEPTION_FLT_OVERFLOW:
164 case EXCEPTION_FLT_UNDERFLOW:
165 case EXCEPTION_FLT_INEXACT_RESULT:
166 W32_SEH_HANDLE_EX(fpe);
169 jit_tls->mono_win_chained_exception_needs_run = TRUE;
173 if (jit_tls->mono_win_chained_exception_needs_run) {
174 /* Don't copy context back if we chained exception
175 * as the handler may have modfied the EXCEPTION_POINTERS
176 * directly. We don't pass sigcontext to chained handlers.
177 * Return continue search so the UnhandledExceptionFilter
178 * can correctly chain the exception.
180 res = EXCEPTION_CONTINUE_SEARCH;
186 void win32_seh_init()
189 restore_stack = get_win32_restore_stack ();
191 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
192 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
195 void win32_seh_cleanup()
199 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
201 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
205 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
209 fpe_handler = handler;
212 ill_handler = handler;
215 segv_handler = handler;
222 #endif /* TARGET_WIN32 */
226 * mono_arch_get_restore_context:
228 * Returns a pointer to a method which restores a previously saved sigcontext.
231 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
233 guint8 *start = NULL;
235 MonoJumpInfo *ji = NULL;
236 GSList *unwind_ops = NULL;
239 /* restore_contect (MonoContext *ctx) */
241 start = code = (guint8 *)mono_global_codeman_reserve (256);
243 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
245 /* Restore all registers except %rip and %r11 */
246 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
247 for (i = 0; i < AMD64_NREG; ++i) {
248 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
249 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
253 * The context resides on the stack, in the stack frame of the
254 * caller of this function. The stack pointer that we need to
255 * restore is potentially many stack frames higher up, so the
256 * distance between them can easily be more than the red zone
257 * size. Hence the stack pointer can be restored only after
258 * we have finished loading everything from the context.
260 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
261 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
262 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
264 /* jump to the saved IP */
265 amd64_jump_reg (code, AMD64_R11);
267 mono_arch_flush_icache (start, code - start);
268 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
271 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
277 * mono_arch_get_call_filter:
279 * Returns a pointer to a method which calls an exception filter. We
280 * also use this function to call finally handlers (we pass NULL as
281 * @exc object in this case).
284 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
290 MonoJumpInfo *ji = NULL;
291 GSList *unwind_ops = NULL;
292 const guint kMaxCodeSize = 128;
294 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
296 /* call_filter (MonoContext *ctx, unsigned long eip) */
299 /* Alloc new frame */
300 amd64_push_reg (code, AMD64_RBP);
301 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
303 /* Save callee saved regs */
305 for (i = 0; i < AMD64_NREG; ++i)
306 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
307 amd64_push_reg (code, i);
313 amd64_push_reg (code, AMD64_RBP);
315 /* Make stack misaligned, the call will make it aligned again */
317 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
319 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
322 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
323 /* load callee saved regs */
324 for (i = 0; i < AMD64_NREG; ++i) {
325 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
326 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
328 /* load exc register */
329 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
331 /* call the handler */
332 amd64_call_reg (code, AMD64_ARG_REG2);
335 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
338 amd64_pop_reg (code, AMD64_RBP);
340 /* Restore callee saved regs */
341 for (i = AMD64_NREG; i >= 0; --i)
342 if (AMD64_IS_CALLEE_SAVED_REG (i))
343 amd64_pop_reg (code, i);
346 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
347 amd64_pop_reg (code, AMD64_RBP);
353 g_assert ((code - start) < kMaxCodeSize);
355 mono_arch_flush_icache (start, code - start);
356 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
359 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
363 #endif /* !DISABLE_JIT */
366 * The first few arguments are dummy, to force the other arguments to be passed on
367 * the stack, this avoids overwriting the argument registers in the throw trampoline.
370 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
371 guint64 dummy5, guint64 dummy6,
372 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
377 /* mctx is on the caller's stack */
378 memcpy (&ctx, mctx, sizeof (MonoContext));
380 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
381 MonoException *mono_ex = (MonoException*)exc;
383 mono_ex->stack_trace = NULL;
384 mono_ex->trace_ips = NULL;
387 mono_error_assert_ok (&error);
389 /* adjust eip so that it point into the call instruction */
390 ctx.gregs [AMD64_RIP] --;
392 mono_handle_exception (&ctx, exc);
393 mono_restore_context (&ctx);
394 g_assert_not_reached ();
398 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
399 guint64 dummy5, guint64 dummy6,
400 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
402 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
405 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
407 mctx->gregs [AMD64_RIP] -= pc_offset;
409 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
410 mctx->gregs [AMD64_RIP] += 1;
412 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
416 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
417 guint64 dummy5, guint64 dummy6,
418 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
420 /* Only the register parameters are valid */
423 /* mctx is on the caller's stack */
424 memcpy (&ctx, mctx, sizeof (MonoContext));
426 mono_resume_unwind (&ctx);
431 * get_throw_trampoline:
433 * Generate a call to mono_amd64_throw_exception/
434 * mono_amd64_throw_corlib_exception.
437 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
441 MonoJumpInfo *ji = NULL;
442 GSList *unwind_ops = NULL;
443 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
444 const guint kMaxCodeSize = 256;
447 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
449 dummy_stack_space = 0;
453 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
455 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
457 /* The stack is unaligned on entry */
458 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
463 unwind_ops = mono_arch_get_cie_program ();
466 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
468 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
469 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
473 * To hide linux/windows calling convention differences, we pass all arguments on
474 * the stack by passing 6 dummy values in registers.
477 arg_offsets [0] = dummy_stack_space + 0;
478 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
479 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
480 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
481 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
484 for (i = 0; i < AMD64_NREG; ++i)
486 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
488 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
489 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
491 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
492 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
493 /* Set arg1 == ctx */
494 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
495 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
496 /* Set arg2 == exc/ex_token_index */
498 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
500 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
501 /* Set arg3 == rethrow/pc offset */
503 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
507 * The caller doesn't pass in a pc/pc offset, instead we simply use the
508 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
510 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
512 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
514 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
518 const char *icall_name;
521 icall_name = "mono_amd64_resume_unwind";
523 icall_name = "mono_amd64_throw_corlib_exception";
525 icall_name = "mono_amd64_throw_exception";
526 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
527 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
529 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
531 amd64_call_reg (code, AMD64_R11);
532 amd64_breakpoint (code);
534 mono_arch_flush_icache (start, code - start);
536 g_assert ((code - start) < kMaxCodeSize);
537 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
539 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
542 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
548 * mono_arch_get_throw_exception:
549 * \returns a function pointer which can be used to raise
550 * exceptions. The returned function has the following
551 * signature: void (*func) (MonoException *exc);
554 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
556 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
560 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
562 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
566 * mono_arch_get_throw_corlib_exception:
568 * Returns a function pointer which can be used to raise
569 * corlib exceptions. The returned function has the following
570 * signature: void (*func) (guint32 ex_token, guint32 offset);
571 * Here, offset is the offset which needs to be substracted from the caller IP
572 * to get the IP of the throw. Passing the offset has the advantage that it
573 * needs no relocations in the caller.
576 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
578 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
580 #endif /* !DISABLE_JIT */
583 * mono_arch_unwind_frame:
585 * This function is used to gather information from @ctx, and store it in @frame_info.
586 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
587 * is modified if needed.
588 * Returns TRUE on success, FALSE otherwise.
591 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
592 MonoJitInfo *ji, MonoContext *ctx,
593 MonoContext *new_ctx, MonoLMF **lmf,
594 mgreg_t **save_locations,
595 StackFrameInfo *frame)
597 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
600 memset (frame, 0, sizeof (StackFrameInfo));
606 mgreg_t regs [MONO_MAX_IREGS + 1];
608 guint32 unwind_info_len;
610 guint8 *epilog = NULL;
612 if (ji->is_trampoline)
613 frame->type = FRAME_TYPE_TRAMPOLINE;
615 frame->type = FRAME_TYPE_MANAGED;
617 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
619 frame->unwind_info = unwind_info;
620 frame->unwind_info_len = unwind_info_len;
623 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
624 mono_print_unwind_info (unwind_info, unwind_info_len);
626 /* LLVM compiled code doesn't have this info */
627 if (ji->has_arch_eh_info)
628 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
630 for (i = 0; i < AMD64_NREG; ++i)
631 regs [i] = new_ctx->gregs [i];
633 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
634 (guint8*)ji->code_start + ji->code_size,
635 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
636 save_locations, MONO_MAX_IREGS, &cfa);
638 for (i = 0; i < AMD64_NREG; ++i)
639 new_ctx->gregs [i] = regs [i];
641 /* The CFA becomes the new SP value */
642 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
645 new_ctx->gregs [AMD64_RIP] --;
651 if (((guint64)(*lmf)->previous_lmf) & 2) {
652 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
654 if (ext->debugger_invoke) {
656 * This LMF entry is created by the soft debug code to mark transitions to
657 * managed code done during invokes.
659 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
660 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
661 } else if (ext->interp_exit) {
662 frame->type = FRAME_TYPE_INTERP_TO_MANAGED;
663 frame->interp_exit_data = ext->interp_exit_data;
665 g_assert_not_reached ();
668 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
673 if (((guint64)(*lmf)->previous_lmf) & 4) {
674 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
676 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
677 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
678 /* This LMF has the rip field set */
680 } else if ((*lmf)->rsp == 0) {
685 * The rsp field is set just before the call which transitioned to native
686 * code. Obtain the rip from the stack.
688 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
691 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
693 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
694 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
702 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
704 if (((guint64)(*lmf)->previous_lmf) & 4) {
705 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
707 /* Trampoline frame */
708 for (i = 0; i < AMD64_NREG; ++i)
709 new_ctx->gregs [i] = ext->ctx->gregs [i];
711 new_ctx->gregs [AMD64_RIP] --;
714 * The registers saved in the LMF will be restored using the normal unwind info,
715 * when the wrapper frame is processed.
719 new_ctx->gregs [AMD64_RIP] = rip;
720 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
721 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
722 for (i = 0; i < AMD64_NREG; ++i) {
723 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
724 new_ctx->gregs [i] = 0;
728 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
739 * Called by resuming from a signal handler.
742 handle_signal_exception (gpointer obj)
744 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
747 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
749 mono_handle_exception (&ctx, (MonoObject *)obj);
751 mono_restore_context (&ctx);
755 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
757 guint64 sp = ctx->gregs [AMD64_RSP];
759 ctx->gregs [AMD64_RDI] = (guint64)user_data;
761 /* Allocate a stack frame below the red zone */
763 /* The stack should be unaligned */
767 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
768 *(guint64*)sp = ctx->gregs [AMD64_RIP];
770 ctx->gregs [AMD64_RSP] = sp;
771 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
775 * mono_arch_handle_exception:
776 * \param ctx saved processor state
777 * \param obj the exception object
780 mono_arch_handle_exception (void *sigctx, gpointer obj)
782 #if defined(MONO_ARCH_USE_SIGACTION)
786 * Handling the exception in the signal handler is problematic, since the original
787 * signal is disabled, and we could run arbitrary code though the debugger. So
788 * resume into the normal stack and do most work there if possible.
790 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
792 /* Pass the ctx parameter in TLS */
793 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
795 mctx = jit_tls->ex_ctx;
796 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
797 mono_monoctx_to_sigctx (&mctx, sigctx);
803 mono_sigctx_to_monoctx (sigctx, &mctx);
805 mono_handle_exception (&mctx, obj);
807 mono_monoctx_to_sigctx (&mctx, sigctx);
814 mono_arch_ip_from_context (void *sigctx)
816 #if defined(MONO_ARCH_USE_SIGACTION)
817 ucontext_t *ctx = (ucontext_t*)sigctx;
819 return (gpointer)UCONTEXT_REG_RIP (ctx);
820 #elif defined(HOST_WIN32)
821 return ((CONTEXT*)sigctx)->Rip;
823 MonoContext *ctx = sigctx;
824 return (gpointer)ctx->gregs [AMD64_RIP];
829 restore_soft_guard_pages (void)
831 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
832 if (jit_tls->stack_ovf_guard_base)
833 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
837 * this function modifies mctx so that when it is restored, it
838 * won't execcute starting at mctx.eip, but in a function that
839 * will restore the protection on the soft-guard pages and return back to
840 * continue at mctx.eip.
843 prepare_for_guard_pages (MonoContext *mctx)
846 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
848 /* the return addr */
849 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
850 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
851 mctx->gregs [AMD64_RSP] = (guint64)sp;
855 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
858 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
861 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
865 mono_handle_exception (&mctx, obj);
867 prepare_for_guard_pages (&mctx);
868 mono_restore_context (&mctx);
872 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
874 #if defined(MONO_ARCH_USE_SIGACTION)
875 MonoException *exc = NULL;
878 MonoContext *copied_ctx;
881 exc = mono_domain_get ()->stack_overflow_ex;
883 /* setup a call frame on the real stack so that control is returned there
884 * and exception handling can continue.
885 * The frame looks like:
889 * 128 is the size of the red zone
891 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
894 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
895 sp = (gpointer *)((char*)sp - frame_size);
896 copied_ctx = (MonoContext*)(sp + 4);
897 /* the arguments must be aligned */
898 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
899 mono_sigctx_to_monoctx (sigctx, copied_ctx);
900 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
901 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
902 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
903 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
904 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
905 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
910 mono_amd64_get_original_ip (void)
912 MonoLMF *lmf = mono_get_lmf ();
916 /* Reset the change to previous_lmf */
917 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
924 mono_amd64_get_exception_trampolines (gboolean aot)
927 GSList *tramps = NULL;
929 /* LLVM needs different throw trampolines */
930 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
931 tramps = g_slist_prepend (tramps, info);
933 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
934 tramps = g_slist_prepend (tramps, info);
936 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
937 tramps = g_slist_prepend (tramps, info);
941 #endif /* !DISABLE_JIT */
944 mono_arch_exceptions_init (void)
950 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
951 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
952 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
953 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
954 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
955 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
957 /* Call this to avoid initialization races */
958 tramps = mono_amd64_get_exception_trampolines (FALSE);
959 for (l = tramps; l; l = l->next) {
960 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
962 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
963 mono_tramp_info_register (info, NULL);
965 g_slist_free (tramps);
969 // Implies defined(TARGET_WIN32)
970 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
973 mono_arch_unwindinfo_create (gpointer* monoui)
975 PUNWIND_INFO newunwindinfo;
976 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
977 newunwindinfo->Version = 1;
981 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
983 PUNWIND_CODE unwindcode;
986 g_assert (unwindinfo != NULL);
988 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
989 g_error ("Larger allocation needed for the unwind information.");
991 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
992 unwindcode = &unwindinfo->UnwindCode [codeindex];
993 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
994 unwindcode->CodeOffset = (guchar)unwind_op->when;
995 unwindcode->OpInfo = unwind_op->reg;
997 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
998 g_error ("Adding unwind info in wrong order.");
1000 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1004 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1006 PUNWIND_CODE unwindcode;
1009 g_assert (unwindinfo != NULL);
1011 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1012 g_error ("Larger allocation needed for the unwind information.");
1014 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
1015 unwindcode = &unwindinfo->UnwindCode [codeindex];
1016 unwindcode->UnwindOp = UWOP_SET_FPREG;
1017 unwindcode->CodeOffset = (guchar)unwind_op->when;
1019 g_assert (unwind_op->val % 16 == 0);
1020 unwindinfo->FrameRegister = unwind_op->reg;
1021 unwindinfo->FrameOffset = unwind_op->val / 16;
1023 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1024 g_error ("Adding unwind info in wrong order.");
1026 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1030 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
1032 PUNWIND_CODE unwindcode;
1037 g_assert (unwindinfo != NULL);
1039 size = unwind_op->val;
1042 g_error ("Stack allocation must be equal to or greater than 0x8.");
1046 else if (size <= 0x7FFF8)
1051 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1052 g_error ("Larger allocation needed for the unwind information.");
1054 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
1055 unwindcode = &unwindinfo->UnwindCode [codeindex];
1057 unwindcode->CodeOffset = (guchar)unwind_op->when;
1059 if (codesneeded == 1) {
1060 /*The size of the allocation is
1061 (the number in the OpInfo member) times 8 plus 8*/
1062 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1063 unwindcode->OpInfo = (size - 8)/8;
1066 if (codesneeded == 3) {
1067 /*the unscaled size of the allocation is recorded
1068 in the next two slots in little-endian format.
1069 NOTE, unwind codes are allocated from end to begining of list so
1070 unwind code will have right execution order. List is sorted on CodeOffset
1071 using descending sort order.*/
1072 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1073 unwindcode->OpInfo = 1;
1074 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1077 /*the size of the allocation divided by 8
1078 is recorded in the next slot.
1079 NOTE, unwind codes are allocated from end to begining of list so
1080 unwind code will have right execution order. List is sorted on CodeOffset
1081 using descending sort order.*/
1082 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1083 unwindcode->OpInfo = 0;
1084 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1088 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1089 g_error ("Adding unwind info in wrong order.");
1091 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1094 static gboolean g_dyn_func_table_inited;
1096 // Dynamic function table used when registering unwind info for OS unwind support.
1097 static GList *g_dynamic_function_table_begin;
1098 static GList *g_dynamic_function_table_end;
1100 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1101 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1103 // Module handle used when explicit loading ntdll.
1104 static HMODULE g_ntdll;
1106 // If Win8 or Win2012Server or later, use growable function tables instead
1107 // of callbacks. Callback solution will still be fallback on older systems.
1108 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1109 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1110 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1112 // When using function table callback solution an out of proc module is needed by
1113 // debuggers in order to read unwind info from debug target.
1115 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1117 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1120 #define MONO_DAC_MODULE_MAX_PATH 1024
1123 init_table_no_lock (void)
1125 if (g_dyn_func_table_inited == FALSE) {
1126 g_assert_checked (g_dynamic_function_table_begin == NULL);
1127 g_assert_checked (g_dynamic_function_table_end == NULL);
1128 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1129 g_assert_checked (g_rtl_grow_function_table == NULL);
1130 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1131 g_assert_checked (g_ntdll == NULL);
1133 // Load functions available on Win8/Win2012Server or later. If running on earlier
1134 // systems the below GetProceAddress will fail, this is expected behavior.
1135 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1136 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1137 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1138 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1141 g_dyn_func_table_inited = TRUE;
1146 mono_arch_unwindinfo_init_table (void)
1148 if (g_dyn_func_table_inited == FALSE) {
1150 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1152 init_table_no_lock ();
1154 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1159 terminate_table_no_lock (void)
1161 if (g_dyn_func_table_inited == TRUE) {
1162 if (g_dynamic_function_table_begin != NULL) {
1163 // Free all list elements.
1164 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1172 g_list_free (g_dynamic_function_table_begin);
1173 g_dynamic_function_table_begin = NULL;
1174 g_dynamic_function_table_end = NULL;
1177 g_rtl_delete_growable_function_table = NULL;
1178 g_rtl_grow_function_table = NULL;
1179 g_rtl_add_growable_function_table = NULL;
1181 if (g_ntdll != NULL) {
1182 FreeLibrary (g_ntdll);
1186 g_dyn_func_table_inited = FALSE;
1191 mono_arch_unwindinfo_terminate_table (void)
1193 if (g_dyn_func_table_inited == TRUE) {
1195 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1197 terminate_table_no_lock ();
1199 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1204 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1206 GList *found_entry = NULL;
1208 // Fast path, look at boundaries.
1209 if (g_dynamic_function_table_begin != NULL) {
1210 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1211 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1213 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1214 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1215 // Entry belongs to first entry in list.
1216 found_entry = g_dynamic_function_table_begin;
1217 *continue_search = FALSE;
1219 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1220 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1221 // Entry has a range that could exist in table, continue search.
1222 *continue_search = TRUE;
1231 static inline DynamicFunctionTableEntry *
1232 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1234 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1235 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1239 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1241 GList *found_entry = NULL;
1242 gboolean continue_search = FALSE;
1244 gsize begin_range = (gsize)code_block;
1245 gsize end_range = begin_range + block_size;
1247 // Fast path, check table boundaries.
1248 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1249 if (found_entry || continue_search == FALSE)
1252 // Scan table for an entry including range.
1253 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1254 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1255 g_assert_checked (current_entry != NULL);
1257 // Do we have a match?
1258 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1267 static inline DynamicFunctionTableEntry *
1268 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1270 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1271 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1275 find_pc_in_table_no_lock_ex (const gpointer pc)
1277 GList *found_entry = NULL;
1278 gboolean continue_search = FALSE;
1280 gsize begin_range = (gsize)pc;
1281 gsize end_range = begin_range;
1283 // Fast path, check table boundaries.
1284 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1285 if (found_entry || continue_search == FALSE)
1288 // Scan table for a entry including range.
1289 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1290 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1291 g_assert_checked (current_entry != NULL);
1293 // Do we have a match?
1294 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1303 static inline DynamicFunctionTableEntry *
1304 find_pc_in_table_no_lock (const gpointer pc)
1306 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1307 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1310 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1312 validate_table_no_lock (void)
1314 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1315 // Method will assert on failure to explicitly indicate what check failed.
1316 if (g_dynamic_function_table_begin != NULL) {
1317 g_assert_checked (g_dynamic_function_table_end != NULL);
1319 DynamicFunctionTableEntry *prevoious_entry = NULL;
1320 DynamicFunctionTableEntry *current_entry = NULL;
1321 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1322 current_entry = (DynamicFunctionTableEntry *)node->data;
1324 g_assert_checked (current_entry != NULL);
1325 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1327 if (prevoious_entry != NULL) {
1328 // List should be sorted in descending order on begin_range.
1329 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1331 // Check for overlapped regions.
1332 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1335 prevoious_entry = current_entry;
1343 validate_table_no_lock (void)
1347 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1350 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1352 DynamicFunctionTableEntry *
1353 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1355 DynamicFunctionTableEntry *new_entry = NULL;
1357 gsize begin_range = (gsize)code_block;
1358 gsize end_range = begin_range + block_size;
1360 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1361 init_table_no_lock ();
1362 new_entry = find_range_in_table_no_lock (code_block, block_size);
1363 if (new_entry == NULL) {
1364 // Allocate new entry.
1365 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1366 if (new_entry != NULL) {
1368 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1369 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1370 InitializeSRWLock (&new_entry->lock);
1371 new_entry->handle = NULL;
1372 new_entry->begin_range = begin_range;
1373 new_entry->end_range = end_range;
1374 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1375 new_entry->rt_funcs_current_count = 0;
1376 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1378 if (new_entry->rt_funcs != NULL) {
1379 // Check insert on boundaries. List is sorted descending on begin_range.
1380 if (g_dynamic_function_table_begin == NULL) {
1381 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1382 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1383 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1384 // Insert at the head.
1385 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1386 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1388 g_list_append (g_dynamic_function_table_end, new_entry);
1389 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1391 //Search and insert at correct position.
1392 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1393 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1394 g_assert_checked (current_entry != NULL);
1396 if (current_entry->begin_range < new_entry->begin_range) {
1397 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1403 // Register dynamic function table entry with OS.
1404 if (g_rtl_add_growable_function_table != NULL) {
1405 // Allocate new growable handle table for entry.
1406 g_assert_checked (new_entry->handle == NULL);
1407 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1408 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1409 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1412 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1413 WCHAR *path = buffer;
1415 // DAC module should be in the same directory as the
1417 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1418 path = wcsrchr (buffer, TEXT('\\'));
1424 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1427 // Register function table callback + out of proc module.
1428 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1429 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1430 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1431 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1435 // Only included in checked builds. Validates the structure of table after insert.
1436 validate_table_no_lock ();
1444 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1450 remove_range_in_table_no_lock (GList *entry)
1452 if (entry != NULL) {
1453 if (entry == g_dynamic_function_table_end)
1454 g_dynamic_function_table_end = entry->prev;
1456 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1457 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1459 g_assert_checked (removed_entry != NULL);
1460 g_assert_checked (removed_entry->rt_funcs != NULL);
1462 // Remove function table from OS.
1463 if (removed_entry->handle != NULL) {
1464 if (g_rtl_delete_growable_function_table != NULL) {
1465 g_rtl_delete_growable_function_table (removed_entry->handle);
1467 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1471 g_free (removed_entry->rt_funcs);
1472 g_free (removed_entry);
1474 g_list_free_1 (entry);
1477 // Only included in checked builds. Validates the structure of table after remove.
1478 validate_table_no_lock ();
1482 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1484 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1486 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1488 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1489 remove_range_in_table_no_lock (found_entry);
1491 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1495 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1497 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1499 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1501 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1502 remove_range_in_table_no_lock (found_entry);
1504 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1508 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1510 PRUNTIME_FUNCTION found_rt_func = NULL;
1512 gsize begin_range = (gsize)code;
1513 gsize end_range = begin_range + code_size;
1515 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1517 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1519 if (found_entry != NULL) {
1521 AcquireSRWLockShared (&found_entry->lock);
1523 g_assert_checked (found_entry->begin_range <= begin_range);
1524 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1525 g_assert_checked (found_entry->rt_funcs != NULL);
1527 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1528 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1530 // Is this our RT function entry?
1531 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1532 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1533 found_rt_func = current_rt_func;
1538 ReleaseSRWLockShared (&found_entry->lock);
1541 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1543 return found_rt_func;
1546 static inline PRUNTIME_FUNCTION
1547 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1549 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1552 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1554 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1556 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1557 // Method will assert on failure to explicitly indicate what check failed.
1558 g_assert_checked (entry != NULL);
1559 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1560 g_assert_checked (entry->rt_funcs != NULL);
1562 PRUNTIME_FUNCTION current_rt_func = NULL;
1563 PRUNTIME_FUNCTION previous_rt_func = NULL;
1564 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1565 current_rt_func = &(entry->rt_funcs [i]);
1567 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1568 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1570 if (previous_rt_func != NULL) {
1571 // List should be sorted in ascending order based on BeginAddress.
1572 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1574 // Check for overlapped regions.
1575 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1578 previous_rt_func = current_rt_func;
1585 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1589 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1592 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1594 PRUNTIME_FUNCTION new_rt_func = NULL;
1596 gsize begin_range = (gsize)code;
1597 gsize end_range = begin_range + code_size;
1599 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1601 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1603 if (found_entry != NULL) {
1605 AcquireSRWLockExclusive (&found_entry->lock);
1607 g_assert_checked (found_entry->begin_range <= begin_range);
1608 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1609 g_assert_checked (found_entry->rt_funcs != NULL);
1610 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1612 gsize code_offset = (gsize)code - found_entry->begin_range;
1613 gsize entry_count = found_entry->rt_funcs_current_count;
1614 gsize max_entry_count = found_entry->rt_funcs_max_count;
1615 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1617 RUNTIME_FUNCTION new_rt_func_data;
1618 new_rt_func_data.BeginAddress = code_offset;
1619 new_rt_func_data.EndAddress = code_offset + code_size;
1621 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1622 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1624 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1626 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1628 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1629 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1630 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1631 new_rt_func = &(current_rt_funcs [entry_count]);
1632 *new_rt_func = new_rt_func_data;
1635 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1636 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1637 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1639 if (new_rt_funcs != NULL) {
1640 gsize from_index = 0;
1643 // Copy from old table into new table. Make sure new rt func gets inserted
1644 // into correct location based on sort order.
1645 for (; from_index < entry_count; ++from_index) {
1646 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1647 new_rt_func = &(new_rt_funcs [to_index++]);
1648 *new_rt_func = new_rt_func_data;
1651 if (current_rt_funcs [from_index].UnwindData != 0)
1652 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1655 // If we didn't insert by now, put it last in the list.
1656 if (new_rt_func == NULL) {
1657 new_rt_func = &(new_rt_funcs [to_index]);
1658 *new_rt_func = new_rt_func_data;
1665 // Update the stats for current entry.
1666 found_entry->rt_funcs_current_count = entry_count;
1667 found_entry->rt_funcs_max_count = max_entry_count;
1669 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1670 // No new table just report increase in use.
1671 g_assert_checked (found_entry->handle != NULL);
1672 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1673 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1674 // New table, delete old table and rt funcs, and register a new one.
1675 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1676 g_rtl_delete_growable_function_table (found_entry->handle);
1677 found_entry->handle = NULL;
1678 g_free (found_entry->rt_funcs);
1679 found_entry->rt_funcs = new_rt_funcs;
1680 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1681 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1682 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1684 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1685 // No table registered with OS, callback solution in use. Switch tables.
1686 g_free (found_entry->rt_funcs);
1687 found_entry->rt_funcs = new_rt_funcs;
1688 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1689 // No table registered with OS, callback solution in use, nothing to do.
1691 g_assert_not_reached ();
1694 // Only included in checked builds. Validates the structure of table after insert.
1695 validate_rt_funcs_in_table_no_lock (found_entry);
1697 ReleaseSRWLockExclusive (&found_entry->lock);
1700 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1705 static PRUNTIME_FUNCTION
1706 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1708 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1712 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1714 if (unwind_ops != NULL && unwindinfo != NULL) {
1715 MonoUnwindOp *unwind_op_data;
1716 gboolean sp_alloced = FALSE;
1717 gboolean fp_alloced = FALSE;
1719 // Replay collected unwind info and setup Windows format.
1720 for (GSList *l = unwind_ops; l; l = l->next) {
1721 unwind_op_data = (MonoUnwindOp *)l->data;
1722 switch (unwind_op_data->op) {
1723 case DW_CFA_offset : {
1724 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1725 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1726 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1727 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1730 case DW_CFA_mono_sp_alloc_info_win64 : {
1731 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1735 case DW_CFA_mono_fp_alloc_info_win64 : {
1736 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1748 initialize_unwind_info_internal (GSList *unwind_ops)
1750 PUNWIND_INFO unwindinfo;
1752 mono_arch_unwindinfo_create (&unwindinfo);
1753 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1759 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1761 UNWIND_INFO unwindinfo = {0};
1762 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1763 return unwindinfo.CountOfCodes;
1767 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1769 MonoCompile * current_cfg = (MonoCompile *)cfg;
1770 g_assert (current_cfg->arch.unwindinfo == NULL);
1771 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1772 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1776 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1778 PUNWIND_INFO unwindinfo, targetinfo;
1780 guint64 targetlocation;
1784 unwindinfo = (PUNWIND_INFO)*monoui;
1785 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1786 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1788 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1790 codecount = unwindinfo->CountOfCodes;
1792 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1793 sizeof (UNWIND_CODE) * codecount);
1796 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1798 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1799 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1801 for (int current = 0; current < codecount; current++) {
1802 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1804 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1805 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1813 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1815 g_free (unwindinfo);
1818 // Register unwind info in table.
1819 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1823 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1825 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1826 if (unwindinfo != NULL) {
1827 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1832 mono_arch_code_chunk_new (void *chunk, int size)
1834 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1837 void mono_arch_code_chunk_destroy (void *chunk)
1839 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1841 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1843 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1844 MonoContinuationRestore
1845 mono_tasklets_arch_restore (void)
1847 static guint8* saved = NULL;
1848 guint8 *code, *start;
1849 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1850 const guint kMaxCodeSize = 64;
1854 return (MonoContinuationRestore)saved;
1855 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1856 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1857 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1858 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1859 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1860 * We move cont to cont_reg since we need both rcx and rdi for the copy
1861 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1863 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1864 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1865 /* setup the copy of the stack */
1866 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1867 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1869 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1870 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1871 amd64_prefix (code, X86_REP_PREFIX);
1874 /* now restore the registers from the LMF */
1875 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1876 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1877 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1880 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1882 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1885 /* state is already in rax */
1886 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1887 g_assert ((code - start) <= kMaxCodeSize);
1889 mono_arch_flush_icache (start, code - start);
1890 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1893 return (MonoContinuationRestore)saved;
1895 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1898 * mono_arch_setup_resume_sighandler_ctx:
1900 * Setup CTX so execution continues at FUNC.
1903 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1906 * When resuming from a signal handler, the stack should be misaligned, just like right after
1909 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1910 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1911 MONO_CONTEXT_SET_IP (ctx, func);
1916 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1918 g_assert_not_reached ();
1923 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1925 g_assert_not_reached ();
1930 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1932 g_assert_not_reached ();
1937 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1939 g_assert_not_reached ();
1944 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1946 g_assert_not_reached ();
1951 mono_amd64_get_exception_trampolines (gboolean aot)
1953 g_assert_not_reached ();
1956 #endif /* DISABLE_JIT */
1958 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1959 MonoContinuationRestore
1960 mono_tasklets_arch_restore (void)
1962 g_assert_not_reached ();
1965 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */