2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Johan Lorensson (lateralusx.github@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
21 #ifdef HAVE_UCONTEXT_H
25 #include <mono/arch/amd64/amd64-codegen.h>
26 #include <mono/metadata/abi-details.h>
27 #include <mono/metadata/appdomain.h>
28 #include <mono/metadata/tabledefs.h>
29 #include <mono/metadata/threads.h>
30 #include <mono/metadata/threads-types.h>
31 #include <mono/metadata/debug-helpers.h>
32 #include <mono/metadata/exception.h>
33 #include <mono/metadata/gc-internals.h>
34 #include <mono/metadata/mono-debug.h>
35 #include <mono/utils/mono-mmap.h>
38 #include "mini-amd64.h"
41 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
44 static MonoW32ExceptionHandler fpe_handler;
45 static MonoW32ExceptionHandler ill_handler;
46 static MonoW32ExceptionHandler segv_handler;
48 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
49 void *mono_win_vectored_exception_handle;
51 #define W32_SEH_HANDLE_EX(_ex) \
52 if (_ex##_handler) _ex##_handler(0, ep, ctx)
54 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
56 #ifndef MONO_CROSS_COMPILE
57 if (mono_old_win_toplevel_exception_filter) {
58 return (*mono_old_win_toplevel_exception_filter)(ep);
62 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
64 return EXCEPTION_CONTINUE_SEARCH;
68 * Unhandled Exception Filter
69 * Top-level per-process exception handler.
71 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
76 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
78 /* If the thread is not managed by the runtime return early */
80 return EXCEPTION_CONTINUE_SEARCH;
82 jit_tls->mono_win_chained_exception_needs_run = FALSE;
83 res = EXCEPTION_CONTINUE_EXECUTION;
85 er = ep->ExceptionRecord;
86 ctx = ep->ContextRecord;
88 switch (er->ExceptionCode) {
89 case EXCEPTION_ACCESS_VIOLATION:
90 W32_SEH_HANDLE_EX(segv);
92 case EXCEPTION_ILLEGAL_INSTRUCTION:
93 W32_SEH_HANDLE_EX(ill);
95 case EXCEPTION_INT_DIVIDE_BY_ZERO:
96 case EXCEPTION_INT_OVERFLOW:
97 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
98 case EXCEPTION_FLT_OVERFLOW:
99 case EXCEPTION_FLT_UNDERFLOW:
100 case EXCEPTION_FLT_INEXACT_RESULT:
101 W32_SEH_HANDLE_EX(fpe);
104 jit_tls->mono_win_chained_exception_needs_run = TRUE;
108 if (jit_tls->mono_win_chained_exception_needs_run) {
109 /* Don't copy context back if we chained exception
110 * as the handler may have modfied the EXCEPTION_POINTERS
111 * directly. We don't pass sigcontext to chained handlers.
112 * Return continue search so the UnhandledExceptionFilter
113 * can correctly chain the exception.
115 res = EXCEPTION_CONTINUE_SEARCH;
121 void win32_seh_init()
123 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
124 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
127 void win32_seh_cleanup()
131 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
133 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
137 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
141 fpe_handler = handler;
144 ill_handler = handler;
147 segv_handler = handler;
154 #endif /* TARGET_WIN32 */
158 * mono_arch_get_restore_context:
160 * Returns a pointer to a method which restores a previously saved sigcontext.
163 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
165 guint8 *start = NULL;
167 MonoJumpInfo *ji = NULL;
168 GSList *unwind_ops = NULL;
171 /* restore_contect (MonoContext *ctx) */
173 start = code = (guint8 *)mono_global_codeman_reserve (256);
175 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
177 /* Restore all registers except %rip and %r11 */
178 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
179 for (i = 0; i < AMD64_NREG; ++i) {
180 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
181 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
185 * The context resides on the stack, in the stack frame of the
186 * caller of this function. The stack pointer that we need to
187 * restore is potentially many stack frames higher up, so the
188 * distance between them can easily be more than the red zone
189 * size. Hence the stack pointer can be restored only after
190 * we have finished loading everything from the context.
192 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
193 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
194 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
196 /* jump to the saved IP */
197 amd64_jump_reg (code, AMD64_R11);
199 mono_arch_flush_icache (start, code - start);
200 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
203 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
209 * mono_arch_get_call_filter:
211 * Returns a pointer to a method which calls an exception filter. We
212 * also use this function to call finally handlers (we pass NULL as
213 * @exc object in this case).
216 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
222 MonoJumpInfo *ji = NULL;
223 GSList *unwind_ops = NULL;
224 const guint kMaxCodeSize = 128;
226 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
228 /* call_filter (MonoContext *ctx, unsigned long eip) */
231 /* Alloc new frame */
232 amd64_push_reg (code, AMD64_RBP);
233 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
235 /* Save callee saved regs */
237 for (i = 0; i < AMD64_NREG; ++i)
238 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
239 amd64_push_reg (code, i);
245 amd64_push_reg (code, AMD64_RBP);
247 /* Make stack misaligned, the call will make it aligned again */
249 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
251 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
254 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
255 /* load callee saved regs */
256 for (i = 0; i < AMD64_NREG; ++i) {
257 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
258 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
260 /* load exc register */
261 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
263 /* call the handler */
264 amd64_call_reg (code, AMD64_ARG_REG2);
267 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
270 amd64_pop_reg (code, AMD64_RBP);
272 /* Restore callee saved regs */
273 for (i = AMD64_NREG; i >= 0; --i)
274 if (AMD64_IS_CALLEE_SAVED_REG (i))
275 amd64_pop_reg (code, i);
278 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
279 amd64_pop_reg (code, AMD64_RBP);
285 g_assert ((code - start) < kMaxCodeSize);
287 mono_arch_flush_icache (start, code - start);
288 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
291 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
295 #endif /* !DISABLE_JIT */
298 * The first few arguments are dummy, to force the other arguments to be passed on
299 * the stack, this avoids overwriting the argument registers in the throw trampoline.
302 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
303 guint64 dummy5, guint64 dummy6,
304 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
309 /* mctx is on the caller's stack */
310 memcpy (&ctx, mctx, sizeof (MonoContext));
312 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
313 MonoException *mono_ex = (MonoException*)exc;
315 mono_ex->stack_trace = NULL;
316 mono_ex->trace_ips = NULL;
319 mono_error_assert_ok (&error);
321 /* adjust eip so that it point into the call instruction */
322 ctx.gregs [AMD64_RIP] --;
324 mono_handle_exception (&ctx, exc);
325 mono_restore_context (&ctx);
326 g_assert_not_reached ();
330 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
331 guint64 dummy5, guint64 dummy6,
332 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
334 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
337 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
339 mctx->gregs [AMD64_RIP] -= pc_offset;
341 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
342 mctx->gregs [AMD64_RIP] += 1;
344 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
348 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
349 guint64 dummy5, guint64 dummy6,
350 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
352 /* Only the register parameters are valid */
355 /* mctx is on the caller's stack */
356 memcpy (&ctx, mctx, sizeof (MonoContext));
358 mono_resume_unwind (&ctx);
363 * get_throw_trampoline:
365 * Generate a call to mono_amd64_throw_exception/
366 * mono_amd64_throw_corlib_exception.
369 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
373 MonoJumpInfo *ji = NULL;
374 GSList *unwind_ops = NULL;
375 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
376 const guint kMaxCodeSize = 256;
379 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
381 dummy_stack_space = 0;
385 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
387 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
389 /* The stack is unaligned on entry */
390 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
395 unwind_ops = mono_arch_get_cie_program ();
398 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
400 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
401 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
405 * To hide linux/windows calling convention differences, we pass all arguments on
406 * the stack by passing 6 dummy values in registers.
409 arg_offsets [0] = dummy_stack_space + 0;
410 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
411 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
412 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
413 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
416 for (i = 0; i < AMD64_NREG; ++i)
418 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
420 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
421 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
423 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
424 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
425 /* Set arg1 == ctx */
426 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
427 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
428 /* Set arg2 == exc/ex_token_index */
430 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
432 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
433 /* Set arg3 == rethrow/pc offset */
435 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
439 * The caller doesn't pass in a pc/pc offset, instead we simply use the
440 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
442 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
444 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
446 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
450 const char *icall_name;
453 icall_name = "mono_amd64_resume_unwind";
455 icall_name = "mono_amd64_throw_corlib_exception";
457 icall_name = "mono_amd64_throw_exception";
458 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
459 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
461 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
463 amd64_call_reg (code, AMD64_R11);
464 amd64_breakpoint (code);
466 mono_arch_flush_icache (start, code - start);
468 g_assert ((code - start) < kMaxCodeSize);
469 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
471 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
474 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
480 * mono_arch_get_throw_exception:
482 * Returns a function pointer which can be used to raise
483 * exceptions. The returned function has the following
484 * signature: void (*func) (MonoException *exc);
488 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
490 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
494 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
496 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
500 * mono_arch_get_throw_corlib_exception:
502 * Returns a function pointer which can be used to raise
503 * corlib exceptions. The returned function has the following
504 * signature: void (*func) (guint32 ex_token, guint32 offset);
505 * Here, offset is the offset which needs to be substracted from the caller IP
506 * to get the IP of the throw. Passing the offset has the advantage that it
507 * needs no relocations in the caller.
510 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
512 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
514 #endif /* !DISABLE_JIT */
517 * mono_arch_unwind_frame:
519 * This function is used to gather information from @ctx, and store it in @frame_info.
520 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
521 * is modified if needed.
522 * Returns TRUE on success, FALSE otherwise.
525 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
526 MonoJitInfo *ji, MonoContext *ctx,
527 MonoContext *new_ctx, MonoLMF **lmf,
528 mgreg_t **save_locations,
529 StackFrameInfo *frame)
531 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
534 memset (frame, 0, sizeof (StackFrameInfo));
540 mgreg_t regs [MONO_MAX_IREGS + 1];
542 guint32 unwind_info_len;
544 guint8 *epilog = NULL;
546 if (ji->is_trampoline)
547 frame->type = FRAME_TYPE_TRAMPOLINE;
549 frame->type = FRAME_TYPE_MANAGED;
551 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
553 frame->unwind_info = unwind_info;
554 frame->unwind_info_len = unwind_info_len;
557 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
558 mono_print_unwind_info (unwind_info, unwind_info_len);
560 /* LLVM compiled code doesn't have this info */
561 if (ji->has_arch_eh_info)
562 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
564 for (i = 0; i < AMD64_NREG; ++i)
565 regs [i] = new_ctx->gregs [i];
567 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
568 (guint8*)ji->code_start + ji->code_size,
569 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
570 save_locations, MONO_MAX_IREGS, &cfa);
572 for (i = 0; i < AMD64_NREG; ++i)
573 new_ctx->gregs [i] = regs [i];
575 /* The CFA becomes the new SP value */
576 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
579 new_ctx->gregs [AMD64_RIP] --;
585 if (((guint64)(*lmf)->previous_lmf) & 2) {
587 * This LMF entry is created by the soft debug code to mark transitions to
588 * managed code done during invokes.
590 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
592 g_assert (ext->debugger_invoke);
594 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
596 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
598 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
603 if (((guint64)(*lmf)->previous_lmf) & 4) {
604 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
606 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
607 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
608 /* This LMF has the rip field set */
610 } else if ((*lmf)->rsp == 0) {
615 * The rsp field is set just before the call which transitioned to native
616 * code. Obtain the rip from the stack.
618 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
621 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
623 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
624 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
632 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
634 if (((guint64)(*lmf)->previous_lmf) & 4) {
635 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
637 /* Trampoline frame */
638 for (i = 0; i < AMD64_NREG; ++i)
639 new_ctx->gregs [i] = ext->ctx->gregs [i];
641 new_ctx->gregs [AMD64_RIP] --;
644 * The registers saved in the LMF will be restored using the normal unwind info,
645 * when the wrapper frame is processed.
649 new_ctx->gregs [AMD64_RIP] = rip;
650 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
651 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
652 for (i = 0; i < AMD64_NREG; ++i) {
653 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
654 new_ctx->gregs [i] = 0;
658 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
669 * Called by resuming from a signal handler.
672 handle_signal_exception (gpointer obj)
674 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
677 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
679 mono_handle_exception (&ctx, (MonoObject *)obj);
681 mono_restore_context (&ctx);
685 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
687 guint64 sp = ctx->gregs [AMD64_RSP];
689 ctx->gregs [AMD64_RDI] = (guint64)user_data;
691 /* Allocate a stack frame below the red zone */
693 /* The stack should be unaligned */
697 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
698 *(guint64*)sp = ctx->gregs [AMD64_RIP];
700 ctx->gregs [AMD64_RSP] = sp;
701 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
705 * mono_arch_handle_exception:
707 * @ctx: saved processor state
708 * @obj: the exception object
711 mono_arch_handle_exception (void *sigctx, gpointer obj)
713 #if defined(MONO_ARCH_USE_SIGACTION)
717 * Handling the exception in the signal handler is problematic, since the original
718 * signal is disabled, and we could run arbitrary code though the debugger. So
719 * resume into the normal stack and do most work there if possible.
721 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
723 /* Pass the ctx parameter in TLS */
724 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
726 mctx = jit_tls->ex_ctx;
727 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
728 mono_monoctx_to_sigctx (&mctx, sigctx);
734 mono_sigctx_to_monoctx (sigctx, &mctx);
736 mono_handle_exception (&mctx, obj);
738 mono_monoctx_to_sigctx (&mctx, sigctx);
745 mono_arch_ip_from_context (void *sigctx)
747 #if defined(MONO_ARCH_USE_SIGACTION)
748 ucontext_t *ctx = (ucontext_t*)sigctx;
750 return (gpointer)UCONTEXT_REG_RIP (ctx);
751 #elif defined(HOST_WIN32)
752 return ((CONTEXT*)sigctx)->Rip;
754 MonoContext *ctx = sigctx;
755 return (gpointer)ctx->gregs [AMD64_RIP];
760 restore_soft_guard_pages (void)
762 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
763 if (jit_tls->stack_ovf_guard_base)
764 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
768 * this function modifies mctx so that when it is restored, it
769 * won't execcute starting at mctx.eip, but in a function that
770 * will restore the protection on the soft-guard pages and return back to
771 * continue at mctx.eip.
774 prepare_for_guard_pages (MonoContext *mctx)
777 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
779 /* the return addr */
780 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
781 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
782 mctx->gregs [AMD64_RSP] = (guint64)sp;
786 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
789 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
792 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
796 mono_handle_exception (&mctx, obj);
798 prepare_for_guard_pages (&mctx);
799 mono_restore_context (&mctx);
803 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
805 #if defined(MONO_ARCH_USE_SIGACTION)
806 MonoException *exc = NULL;
809 MonoContext *copied_ctx;
812 exc = mono_domain_get ()->stack_overflow_ex;
814 /* setup a call frame on the real stack so that control is returned there
815 * and exception handling can continue.
816 * The frame looks like:
820 * 128 is the size of the red zone
822 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
825 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
826 sp = (gpointer *)((char*)sp - frame_size);
827 copied_ctx = (MonoContext*)(sp + 4);
828 /* the arguments must be aligned */
829 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
830 mono_sigctx_to_monoctx (sigctx, copied_ctx);
831 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
832 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
833 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
834 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
835 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
836 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
841 mono_amd64_get_original_ip (void)
843 MonoLMF *lmf = mono_get_lmf ();
847 /* Reset the change to previous_lmf */
848 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
855 mono_amd64_get_exception_trampolines (gboolean aot)
858 GSList *tramps = NULL;
860 /* LLVM needs different throw trampolines */
861 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
862 tramps = g_slist_prepend (tramps, info);
864 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
865 tramps = g_slist_prepend (tramps, info);
867 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
868 tramps = g_slist_prepend (tramps, info);
872 #endif /* !DISABLE_JIT */
875 mono_arch_exceptions_init (void)
881 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
882 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
883 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
884 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
885 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
886 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
888 /* Call this to avoid initialization races */
889 tramps = mono_amd64_get_exception_trampolines (FALSE);
890 for (l = tramps; l; l = l->next) {
891 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
893 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
894 mono_tramp_info_register (info, NULL);
896 g_slist_free (tramps);
900 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
903 mono_arch_unwindinfo_create (gpointer* monoui)
905 PUNWIND_INFO newunwindinfo;
906 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
907 newunwindinfo->Version = 1;
911 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
913 PUNWIND_CODE unwindcode;
916 g_assert (unwindinfo != NULL);
918 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
919 g_error ("Larger allocation needed for the unwind information.");
921 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
922 unwindcode = &unwindinfo->UnwindCode [codeindex];
923 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
924 unwindcode->CodeOffset = (guchar)unwind_op->when;
925 unwindcode->OpInfo = unwind_op->reg;
927 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
928 g_error ("Adding unwind info in wrong order.");
930 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
934 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
936 PUNWIND_CODE unwindcode;
939 g_assert (unwindinfo != NULL);
941 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
942 g_error ("Larger allocation needed for the unwind information.");
944 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
945 unwindcode = &unwindinfo->UnwindCode [codeindex];
946 unwindcode->UnwindOp = UWOP_SET_FPREG;
947 unwindcode->CodeOffset = (guchar)unwind_op->when;
949 g_assert (unwind_op->val % 16 == 0);
950 unwindinfo->FrameRegister = unwind_op->reg;
951 unwindinfo->FrameOffset = unwind_op->val / 16;
953 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
954 g_error ("Adding unwind info in wrong order.");
956 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
960 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
962 PUNWIND_CODE unwindcode;
967 g_assert (unwindinfo != NULL);
969 size = unwind_op->val;
972 g_error ("Stack allocation must be equal to or greater than 0x8.");
976 else if (size <= 0x7FFF8)
981 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
982 g_error ("Larger allocation needed for the unwind information.");
984 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
985 unwindcode = &unwindinfo->UnwindCode [codeindex];
987 unwindcode->CodeOffset = (guchar)unwind_op->when;
989 if (codesneeded == 1) {
990 /*The size of the allocation is
991 (the number in the OpInfo member) times 8 plus 8*/
992 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
993 unwindcode->OpInfo = (size - 8)/8;
996 if (codesneeded == 3) {
997 /*the unscaled size of the allocation is recorded
998 in the next two slots in little-endian format.
999 NOTE, unwind codes are allocated from end to begining of list so
1000 unwind code will have right execution order. List is sorted on CodeOffset
1001 using descending sort order.*/
1002 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1003 unwindcode->OpInfo = 1;
1004 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1007 /*the size of the allocation divided by 8
1008 is recorded in the next slot.
1009 NOTE, unwind codes are allocated from end to begining of list so
1010 unwind code will have right execution order. List is sorted on CodeOffset
1011 using descending sort order.*/
1012 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1013 unwindcode->OpInfo = 0;
1014 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1018 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1019 g_error ("Adding unwind info in wrong order.");
1021 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1024 static gboolean g_dyn_func_table_inited;
1026 // Dynamic function table used when registering unwind info for OS unwind support.
1027 static GList *g_dynamic_function_table_begin;
1028 static GList *g_dynamic_function_table_end;
1030 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1031 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1033 // Module handle used when explicit loading ntdll.
1034 static HMODULE g_ntdll;
1036 // If Win8 or Win2012Server or later, use growable function tables instead
1037 // of callbacks. Callback solution will still be fallback on older systems.
1038 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1039 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1040 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1042 // When using function table callback solution an out of proc module is needed by
1043 // debuggers in order to read unwind info from debug target.
1045 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1047 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1050 #define MONO_DAC_MODULE_MAX_PATH 1024
1053 init_table_no_lock (void)
1055 if (g_dyn_func_table_inited == FALSE) {
1056 g_assert_checked (g_dynamic_function_table_begin == NULL);
1057 g_assert_checked (g_dynamic_function_table_end == NULL);
1058 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1059 g_assert_checked (g_rtl_grow_function_table == NULL);
1060 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1061 g_assert_checked (g_ntdll == NULL);
1063 // Load functions available on Win8/Win2012Server or later. If running on earlier
1064 // systems the below GetProceAddress will fail, this is expected behavior.
1065 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1066 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1067 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1068 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1071 g_dyn_func_table_inited = TRUE;
1076 mono_arch_unwindinfo_init_table (void)
1078 if (g_dyn_func_table_inited == FALSE) {
1080 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1082 init_table_no_lock ();
1084 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1089 terminate_table_no_lock (void)
1091 if (g_dyn_func_table_inited == TRUE) {
1092 if (g_dynamic_function_table_begin != NULL) {
1093 // Free all list elements.
1094 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1102 g_list_free (g_dynamic_function_table_begin);
1103 g_dynamic_function_table_begin = NULL;
1104 g_dynamic_function_table_end = NULL;
1107 g_rtl_delete_growable_function_table = NULL;
1108 g_rtl_grow_function_table = NULL;
1109 g_rtl_add_growable_function_table = NULL;
1111 if (g_ntdll != NULL) {
1112 FreeLibrary (g_ntdll);
1116 g_dyn_func_table_inited = FALSE;
1121 mono_arch_unwindinfo_terminate_table (void)
1123 if (g_dyn_func_table_inited == TRUE) {
1125 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1127 terminate_table_no_lock ();
1129 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1134 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1136 GList *found_entry = NULL;
1138 // Fast path, look at boundaries.
1139 if (g_dynamic_function_table_begin != NULL) {
1140 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1141 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1143 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1144 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1145 // Entry belongs to first entry in list.
1146 found_entry = g_dynamic_function_table_begin;
1147 *continue_search = FALSE;
1149 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1150 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1151 // Entry has a range that could exist in table, continue search.
1152 *continue_search = TRUE;
1161 static inline DynamicFunctionTableEntry *
1162 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1164 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1165 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1169 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1171 GList *found_entry = NULL;
1172 gboolean continue_search = FALSE;
1174 gsize begin_range = (gsize)code_block;
1175 gsize end_range = begin_range + block_size;
1177 // Fast path, check table boundaries.
1178 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1179 if (found_entry || continue_search == FALSE)
1182 // Scan table for an entry including range.
1183 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1184 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1185 g_assert_checked (current_entry != NULL);
1187 // Do we have a match?
1188 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1197 static inline DynamicFunctionTableEntry *
1198 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1200 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1201 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1205 find_pc_in_table_no_lock_ex (const gpointer pc)
1207 GList *found_entry = NULL;
1208 gboolean continue_search = FALSE;
1210 gsize begin_range = (gsize)pc;
1211 gsize end_range = begin_range;
1213 // Fast path, check table boundaries.
1214 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1215 if (found_entry || continue_search == FALSE)
1218 // Scan table for a entry including range.
1219 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1220 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1221 g_assert_checked (current_entry != NULL);
1223 // Do we have a match?
1224 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1233 static inline DynamicFunctionTableEntry *
1234 find_pc_in_table_no_lock (const gpointer pc)
1236 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1237 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1240 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1242 validate_table_no_lock (void)
1244 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1245 // Method will assert on failure to explicitly indicate what check failed.
1246 if (g_dynamic_function_table_begin != NULL) {
1247 g_assert_checked (g_dynamic_function_table_end != NULL);
1249 DynamicFunctionTableEntry *prevoious_entry = NULL;
1250 DynamicFunctionTableEntry *current_entry = NULL;
1251 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1252 current_entry = (DynamicFunctionTableEntry *)node->data;
1254 g_assert_checked (current_entry != NULL);
1255 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1257 if (prevoious_entry != NULL) {
1258 // List should be sorted in descending order on begin_range.
1259 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1261 // Check for overlapped regions.
1262 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1265 prevoious_entry = current_entry;
1273 validate_table_no_lock (void)
1277 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1280 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1282 DynamicFunctionTableEntry *
1283 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1285 DynamicFunctionTableEntry *new_entry = NULL;
1287 gsize begin_range = (gsize)code_block;
1288 gsize end_range = begin_range + block_size;
1290 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1291 init_table_no_lock ();
1292 new_entry = find_range_in_table_no_lock (code_block, block_size);
1293 if (new_entry == NULL) {
1294 // Allocate new entry.
1295 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1296 if (new_entry != NULL) {
1298 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1299 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1300 InitializeSRWLock (&new_entry->lock);
1301 new_entry->handle = NULL;
1302 new_entry->begin_range = begin_range;
1303 new_entry->end_range = end_range;
1304 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1305 new_entry->rt_funcs_current_count = 0;
1306 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1308 if (new_entry->rt_funcs != NULL) {
1309 // Check insert on boundaries. List is sorted descending on begin_range.
1310 if (g_dynamic_function_table_begin == NULL) {
1311 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1312 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1313 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1314 // Insert at the head.
1315 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1316 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1318 g_list_append (g_dynamic_function_table_end, new_entry);
1319 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1321 //Search and insert at correct position.
1322 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1323 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1324 g_assert_checked (current_entry != NULL);
1326 if (current_entry->begin_range < new_entry->begin_range) {
1327 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1333 // Register dynamic function table entry with OS.
1334 if (g_rtl_add_growable_function_table != NULL) {
1335 // Allocate new growable handle table for entry.
1336 g_assert_checked (new_entry->handle == NULL);
1337 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1338 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1339 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1342 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1343 WCHAR *path = buffer;
1345 // DAC module should be in the same directory as the
1347 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1348 path = wcsrchr (buffer, TEXT('\\'));
1354 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1357 // Register function table callback + out of proc module.
1358 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1359 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1360 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1361 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1365 // Only included in checked builds. Validates the structure of table after insert.
1366 validate_table_no_lock ();
1374 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1380 remove_range_in_table_no_lock (GList *entry)
1382 if (entry != NULL) {
1383 if (entry == g_dynamic_function_table_end)
1384 g_dynamic_function_table_end = entry->prev;
1386 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1387 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1389 g_assert_checked (removed_entry != NULL);
1390 g_assert_checked (removed_entry->rt_funcs != NULL);
1392 // Remove function table from OS.
1393 if (removed_entry->handle != NULL) {
1394 if (g_rtl_delete_growable_function_table != NULL) {
1395 g_rtl_delete_growable_function_table (removed_entry->handle);
1397 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1401 g_free (removed_entry->rt_funcs);
1402 g_free (removed_entry);
1404 g_list_free_1 (entry);
1407 // Only included in checked builds. Validates the structure of table after remove.
1408 validate_table_no_lock ();
1412 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1414 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1416 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1418 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1419 remove_range_in_table_no_lock (found_entry);
1421 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1425 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1427 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1429 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1431 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1432 remove_range_in_table_no_lock (found_entry);
1434 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1438 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1440 PRUNTIME_FUNCTION found_rt_func = NULL;
1442 gsize begin_range = (gsize)code;
1443 gsize end_range = begin_range + code_size;
1445 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1447 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1449 if (found_entry != NULL) {
1451 AcquireSRWLockShared (&found_entry->lock);
1453 g_assert_checked (found_entry->begin_range <= begin_range);
1454 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1455 g_assert_checked (found_entry->rt_funcs != NULL);
1457 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1458 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1460 // Is this our RT function entry?
1461 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1462 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1463 found_rt_func = current_rt_func;
1468 ReleaseSRWLockShared (&found_entry->lock);
1471 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1473 return found_rt_func;
1476 inline PRUNTIME_FUNCTION
1477 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1479 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1482 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1484 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1486 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1487 // Method will assert on failure to explicitly indicate what check failed.
1488 g_assert_checked (entry != NULL);
1489 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1490 g_assert_checked (entry->rt_funcs != NULL);
1492 PRUNTIME_FUNCTION current_rt_func = NULL;
1493 PRUNTIME_FUNCTION previous_rt_func = NULL;
1494 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1495 current_rt_func = &(entry->rt_funcs [i]);
1497 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1498 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1500 if (previous_rt_func != NULL) {
1501 // List should be sorted in ascending order based on BeginAddress.
1502 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1504 // Check for overlapped regions.
1505 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1508 previous_rt_func = current_rt_func;
1515 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1519 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1522 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1524 PRUNTIME_FUNCTION new_rt_func = NULL;
1526 gsize begin_range = (gsize)code;
1527 gsize end_range = begin_range + code_size;
1529 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1531 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1533 if (found_entry != NULL) {
1535 AcquireSRWLockExclusive (&found_entry->lock);
1537 g_assert_checked (found_entry->begin_range <= begin_range);
1538 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1539 g_assert_checked (found_entry->rt_funcs != NULL);
1540 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1542 gsize code_offset = (gsize)code - found_entry->begin_range;
1543 gsize entry_count = found_entry->rt_funcs_current_count;
1544 gsize max_entry_count = found_entry->rt_funcs_max_count;
1545 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1547 RUNTIME_FUNCTION new_rt_func_data;
1548 new_rt_func_data.BeginAddress = code_offset;
1549 new_rt_func_data.EndAddress = code_offset + code_size;
1551 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1552 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1554 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1556 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1558 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1559 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1560 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1561 new_rt_func = &(current_rt_funcs [entry_count]);
1562 *new_rt_func = new_rt_func_data;
1565 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1566 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1567 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1569 if (new_rt_funcs != NULL) {
1570 gsize from_index = 0;
1573 // Copy from old table into new table. Make sure new rt func gets inserted
1574 // into correct location based on sort order.
1575 for (; from_index < entry_count; ++from_index) {
1576 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1577 new_rt_func = &(new_rt_funcs [to_index++]);
1578 *new_rt_func = new_rt_func_data;
1581 if (current_rt_funcs [from_index].UnwindData != 0)
1582 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1585 // If we didn't insert by now, put it last in the list.
1586 if (new_rt_func == NULL) {
1587 new_rt_func = &(new_rt_funcs [to_index]);
1588 *new_rt_func = new_rt_func_data;
1595 // Update the stats for current entry.
1596 found_entry->rt_funcs_current_count = entry_count;
1597 found_entry->rt_funcs_max_count = max_entry_count;
1599 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1600 // No new table just report increase in use.
1601 g_assert_checked (found_entry->handle != NULL);
1602 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1603 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1604 // New table, delete old table and rt funcs, and register a new one.
1605 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1606 g_rtl_delete_growable_function_table (found_entry->handle);
1607 found_entry->handle = NULL;
1608 g_free (found_entry->rt_funcs);
1609 found_entry->rt_funcs = new_rt_funcs;
1610 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1611 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1612 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1614 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1615 // No table registered with OS, callback solution in use. Switch tables.
1616 g_free (found_entry->rt_funcs);
1617 found_entry->rt_funcs = new_rt_funcs;
1618 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1619 // No table registered with OS, callback solution in use, nothing to do.
1621 g_assert_not_reached ();
1624 // Only included in checked builds. Validates the structure of table after insert.
1625 validate_rt_funcs_in_table_no_lock (found_entry);
1627 ReleaseSRWLockExclusive (&found_entry->lock);
1630 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1635 static PRUNTIME_FUNCTION
1636 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1638 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1642 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1644 if (unwind_ops != NULL && unwindinfo != NULL) {
1645 MonoUnwindOp *unwind_op_data;
1646 gboolean sp_alloced = FALSE;
1647 gboolean fp_alloced = FALSE;
1649 // Replay collected unwind info and setup Windows format.
1650 for (GSList *l = unwind_ops; l; l = l->next) {
1651 unwind_op_data = (MonoUnwindOp *)l->data;
1652 switch (unwind_op_data->op) {
1653 case DW_CFA_offset : {
1654 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1655 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1656 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1657 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1660 case DW_CFA_mono_sp_alloc_info_win64 : {
1661 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1665 case DW_CFA_mono_fp_alloc_info_win64 : {
1666 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1678 initialize_unwind_info_internal (GSList *unwind_ops)
1680 PUNWIND_INFO unwindinfo;
1682 mono_arch_unwindinfo_create (&unwindinfo);
1683 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1689 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1691 UNWIND_INFO unwindinfo = {0};
1692 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1693 return unwindinfo.CountOfCodes;
1697 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1699 MonoCompile * current_cfg = (MonoCompile *)cfg;
1700 g_assert (current_cfg->arch.unwindinfo == NULL);
1701 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1702 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1706 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1708 PUNWIND_INFO unwindinfo, targetinfo;
1710 guint64 targetlocation;
1714 unwindinfo = (PUNWIND_INFO)*monoui;
1715 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1716 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1718 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1720 codecount = unwindinfo->CountOfCodes;
1722 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1723 sizeof (UNWIND_CODE) * codecount);
1726 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1728 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1729 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1731 for (int current = 0; current < codecount; current++) {
1732 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1734 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1735 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1743 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1745 g_free (unwindinfo);
1748 // Register unwind info in table.
1749 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1753 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1755 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1756 if (unwindinfo != NULL) {
1757 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1762 mono_arch_code_chunk_new (void *chunk, int size)
1764 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1767 void mono_arch_code_chunk_destroy (void *chunk)
1769 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1771 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1773 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1774 MonoContinuationRestore
1775 mono_tasklets_arch_restore (void)
1777 static guint8* saved = NULL;
1778 guint8 *code, *start;
1779 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1780 const guint kMaxCodeSize = 64;
1784 return (MonoContinuationRestore)saved;
1785 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1786 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1787 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1788 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1789 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1790 * We move cont to cont_reg since we need both rcx and rdi for the copy
1791 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1793 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1794 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1795 /* setup the copy of the stack */
1796 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1797 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1799 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1800 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1801 amd64_prefix (code, X86_REP_PREFIX);
1804 /* now restore the registers from the LMF */
1805 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1806 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1807 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1810 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1812 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1815 /* state is already in rax */
1816 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1817 g_assert ((code - start) <= kMaxCodeSize);
1819 mono_arch_flush_icache (start, code - start);
1820 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1823 return (MonoContinuationRestore)saved;
1825 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1828 * mono_arch_setup_resume_sighandler_ctx:
1830 * Setup CTX so execution continues at FUNC.
1833 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1836 * When resuming from a signal handler, the stack should be misaligned, just like right after
1839 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1840 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1841 MONO_CONTEXT_SET_IP (ctx, func);
1846 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1848 g_assert_not_reached ();
1853 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1855 g_assert_not_reached ();
1860 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1862 g_assert_not_reached ();
1867 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1869 g_assert_not_reached ();
1874 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1876 g_assert_not_reached ();
1881 mono_amd64_get_exception_trampolines (gboolean aot)
1883 g_assert_not_reached ();
1886 #endif /* DISABLE_JIT */
1888 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1889 MonoContinuationRestore
1890 mono_tasklets_arch_restore (void)
1892 g_assert_not_reached ();
1895 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */