3 * exception support for AMD64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
22 #ifdef HAVE_UCONTEXT_H
26 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/metadata/abi-details.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/tabledefs.h>
30 #include <mono/metadata/threads.h>
31 #include <mono/metadata/threads-types.h>
32 #include <mono/metadata/debug-helpers.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/gc-internals.h>
35 #include <mono/metadata/mono-debug.h>
36 #include <mono/utils/mono-mmap.h>
39 #include "mini-amd64.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
45 static MonoW32ExceptionHandler fpe_handler;
46 static MonoW32ExceptionHandler ill_handler;
47 static MonoW32ExceptionHandler segv_handler;
49 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
50 void *mono_win_vectored_exception_handle;
52 #define W32_SEH_HANDLE_EX(_ex) \
53 if (_ex##_handler) _ex##_handler(0, ep, ctx)
55 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
57 #ifndef MONO_CROSS_COMPILE
58 if (mono_old_win_toplevel_exception_filter) {
59 return (*mono_old_win_toplevel_exception_filter)(ep);
63 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
65 return EXCEPTION_CONTINUE_SEARCH;
69 * Unhandled Exception Filter
70 * Top-level per-process exception handler.
72 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
77 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
79 /* If the thread is not managed by the runtime return early */
81 return EXCEPTION_CONTINUE_SEARCH;
83 jit_tls->mono_win_chained_exception_needs_run = FALSE;
84 res = EXCEPTION_CONTINUE_EXECUTION;
86 er = ep->ExceptionRecord;
87 ctx = ep->ContextRecord;
89 switch (er->ExceptionCode) {
90 case EXCEPTION_ACCESS_VIOLATION:
91 W32_SEH_HANDLE_EX(segv);
93 case EXCEPTION_ILLEGAL_INSTRUCTION:
94 W32_SEH_HANDLE_EX(ill);
96 case EXCEPTION_INT_DIVIDE_BY_ZERO:
97 case EXCEPTION_INT_OVERFLOW:
98 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
99 case EXCEPTION_FLT_OVERFLOW:
100 case EXCEPTION_FLT_UNDERFLOW:
101 case EXCEPTION_FLT_INEXACT_RESULT:
102 W32_SEH_HANDLE_EX(fpe);
105 jit_tls->mono_win_chained_exception_needs_run = TRUE;
109 if (jit_tls->mono_win_chained_exception_needs_run) {
110 /* Don't copy context back if we chained exception
111 * as the handler may have modfied the EXCEPTION_POINTERS
112 * directly. We don't pass sigcontext to chained handlers.
113 * Return continue search so the UnhandledExceptionFilter
114 * can correctly chain the exception.
116 res = EXCEPTION_CONTINUE_SEARCH;
122 void win32_seh_init()
124 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
125 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
128 void win32_seh_cleanup()
132 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
134 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
138 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
142 fpe_handler = handler;
145 ill_handler = handler;
148 segv_handler = handler;
155 #endif /* TARGET_WIN32 */
159 * mono_arch_get_restore_context:
161 * Returns a pointer to a method which restores a previously saved sigcontext.
164 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
166 guint8 *start = NULL;
168 MonoJumpInfo *ji = NULL;
169 GSList *unwind_ops = NULL;
172 /* restore_contect (MonoContext *ctx) */
174 start = code = (guint8 *)mono_global_codeman_reserve (256);
176 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
178 /* Restore all registers except %rip and %r11 */
179 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
180 for (i = 0; i < AMD64_NREG; ++i) {
181 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
182 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
186 * The context resides on the stack, in the stack frame of the
187 * caller of this function. The stack pointer that we need to
188 * restore is potentially many stack frames higher up, so the
189 * distance between them can easily be more than the red zone
190 * size. Hence the stack pointer can be restored only after
191 * we have finished loading everything from the context.
193 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
194 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
195 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
197 /* jump to the saved IP */
198 amd64_jump_reg (code, AMD64_R11);
200 mono_arch_flush_icache (start, code - start);
201 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
204 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
210 * mono_arch_get_call_filter:
212 * Returns a pointer to a method which calls an exception filter. We
213 * also use this function to call finally handlers (we pass NULL as
214 * @exc object in this case).
217 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
223 MonoJumpInfo *ji = NULL;
224 GSList *unwind_ops = NULL;
225 const guint kMaxCodeSize = 128;
227 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
229 /* call_filter (MonoContext *ctx, unsigned long eip) */
232 /* Alloc new frame */
233 amd64_push_reg (code, AMD64_RBP);
234 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
236 /* Save callee saved regs */
238 for (i = 0; i < AMD64_NREG; ++i)
239 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
240 amd64_push_reg (code, i);
246 amd64_push_reg (code, AMD64_RBP);
248 /* Make stack misaligned, the call will make it aligned again */
250 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
252 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
255 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
256 /* load callee saved regs */
257 for (i = 0; i < AMD64_NREG; ++i) {
258 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
259 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
261 /* load exc register */
262 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
264 /* call the handler */
265 amd64_call_reg (code, AMD64_ARG_REG2);
268 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
271 amd64_pop_reg (code, AMD64_RBP);
273 /* Restore callee saved regs */
274 for (i = AMD64_NREG; i >= 0; --i)
275 if (AMD64_IS_CALLEE_SAVED_REG (i))
276 amd64_pop_reg (code, i);
279 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
280 amd64_pop_reg (code, AMD64_RBP);
286 g_assert ((code - start) < kMaxCodeSize);
288 mono_arch_flush_icache (start, code - start);
289 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
292 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
296 #endif /* !DISABLE_JIT */
299 * The first few arguments are dummy, to force the other arguments to be passed on
300 * the stack, this avoids overwriting the argument registers in the throw trampoline.
303 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
304 guint64 dummy5, guint64 dummy6,
305 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
310 /* mctx is on the caller's stack */
311 memcpy (&ctx, mctx, sizeof (MonoContext));
313 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
314 MonoException *mono_ex = (MonoException*)exc;
316 mono_ex->stack_trace = NULL;
317 mono_ex->trace_ips = NULL;
320 mono_error_assert_ok (&error);
322 /* adjust eip so that it point into the call instruction */
323 ctx.gregs [AMD64_RIP] --;
325 mono_handle_exception (&ctx, exc);
326 mono_restore_context (&ctx);
327 g_assert_not_reached ();
331 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
332 guint64 dummy5, guint64 dummy6,
333 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
335 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
338 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
340 mctx->gregs [AMD64_RIP] -= pc_offset;
342 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
343 mctx->gregs [AMD64_RIP] += 1;
345 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
349 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
350 guint64 dummy5, guint64 dummy6,
351 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
353 /* Only the register parameters are valid */
356 /* mctx is on the caller's stack */
357 memcpy (&ctx, mctx, sizeof (MonoContext));
359 mono_resume_unwind (&ctx);
364 * get_throw_trampoline:
366 * Generate a call to mono_amd64_throw_exception/
367 * mono_amd64_throw_corlib_exception.
370 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
374 MonoJumpInfo *ji = NULL;
375 GSList *unwind_ops = NULL;
376 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
377 const guint kMaxCodeSize = 256;
380 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
382 dummy_stack_space = 0;
386 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
388 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
390 /* The stack is unaligned on entry */
391 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
396 unwind_ops = mono_arch_get_cie_program ();
399 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
401 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
402 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
406 * To hide linux/windows calling convention differences, we pass all arguments on
407 * the stack by passing 6 dummy values in registers.
410 arg_offsets [0] = dummy_stack_space + 0;
411 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
412 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
413 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
414 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
417 for (i = 0; i < AMD64_NREG; ++i)
419 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
421 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
422 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
424 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
425 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
426 /* Set arg1 == ctx */
427 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
428 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
429 /* Set arg2 == exc/ex_token_index */
431 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
433 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
434 /* Set arg3 == rethrow/pc offset */
436 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
440 * The caller doesn't pass in a pc/pc offset, instead we simply use the
441 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
443 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
445 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
447 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
451 const char *icall_name;
454 icall_name = "mono_amd64_resume_unwind";
456 icall_name = "mono_amd64_throw_corlib_exception";
458 icall_name = "mono_amd64_throw_exception";
459 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
460 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
462 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
464 amd64_call_reg (code, AMD64_R11);
465 amd64_breakpoint (code);
467 mono_arch_flush_icache (start, code - start);
469 g_assert ((code - start) < kMaxCodeSize);
470 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
472 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
475 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
481 * mono_arch_get_throw_exception:
482 * \returns a function pointer which can be used to raise
483 * exceptions. The returned function has the following
484 * signature: void (*func) (MonoException *exc);
487 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
489 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
493 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
495 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
499 * mono_arch_get_throw_corlib_exception:
501 * Returns a function pointer which can be used to raise
502 * corlib exceptions. The returned function has the following
503 * signature: void (*func) (guint32 ex_token, guint32 offset);
504 * Here, offset is the offset which needs to be substracted from the caller IP
505 * to get the IP of the throw. Passing the offset has the advantage that it
506 * needs no relocations in the caller.
509 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
511 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
513 #endif /* !DISABLE_JIT */
516 * mono_arch_unwind_frame:
518 * This function is used to gather information from @ctx, and store it in @frame_info.
519 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
520 * is modified if needed.
521 * Returns TRUE on success, FALSE otherwise.
524 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
525 MonoJitInfo *ji, MonoContext *ctx,
526 MonoContext *new_ctx, MonoLMF **lmf,
527 mgreg_t **save_locations,
528 StackFrameInfo *frame)
530 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
533 memset (frame, 0, sizeof (StackFrameInfo));
539 mgreg_t regs [MONO_MAX_IREGS + 1];
541 guint32 unwind_info_len;
543 guint8 *epilog = NULL;
545 if (ji->is_trampoline)
546 frame->type = FRAME_TYPE_TRAMPOLINE;
548 frame->type = FRAME_TYPE_MANAGED;
550 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
552 frame->unwind_info = unwind_info;
553 frame->unwind_info_len = unwind_info_len;
556 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
557 mono_print_unwind_info (unwind_info, unwind_info_len);
559 /* LLVM compiled code doesn't have this info */
560 if (ji->has_arch_eh_info)
561 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
563 for (i = 0; i < AMD64_NREG; ++i)
564 regs [i] = new_ctx->gregs [i];
566 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
567 (guint8*)ji->code_start + ji->code_size,
568 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
569 save_locations, MONO_MAX_IREGS, &cfa);
571 for (i = 0; i < AMD64_NREG; ++i)
572 new_ctx->gregs [i] = regs [i];
574 /* The CFA becomes the new SP value */
575 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
578 new_ctx->gregs [AMD64_RIP] --;
584 if (((guint64)(*lmf)->previous_lmf) & 2) {
586 * This LMF entry is created by the soft debug code to mark transitions to
587 * managed code done during invokes.
589 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
591 g_assert (ext->debugger_invoke);
593 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
595 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
597 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
602 if (((guint64)(*lmf)->previous_lmf) & 4) {
603 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
605 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
606 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
607 /* This LMF has the rip field set */
609 } else if ((*lmf)->rsp == 0) {
614 * The rsp field is set just before the call which transitioned to native
615 * code. Obtain the rip from the stack.
617 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
620 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
622 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
623 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
631 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
633 if (((guint64)(*lmf)->previous_lmf) & 4) {
634 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
636 /* Trampoline frame */
637 for (i = 0; i < AMD64_NREG; ++i)
638 new_ctx->gregs [i] = ext->ctx->gregs [i];
640 new_ctx->gregs [AMD64_RIP] --;
643 * The registers saved in the LMF will be restored using the normal unwind info,
644 * when the wrapper frame is processed.
648 new_ctx->gregs [AMD64_RIP] = rip;
649 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
650 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
651 for (i = 0; i < AMD64_NREG; ++i) {
652 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
653 new_ctx->gregs [i] = 0;
657 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
668 * Called by resuming from a signal handler.
671 handle_signal_exception (gpointer obj)
673 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
676 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
678 mono_handle_exception (&ctx, (MonoObject *)obj);
680 mono_restore_context (&ctx);
684 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
686 guint64 sp = ctx->gregs [AMD64_RSP];
688 ctx->gregs [AMD64_RDI] = (guint64)user_data;
690 /* Allocate a stack frame below the red zone */
692 /* The stack should be unaligned */
696 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
697 *(guint64*)sp = ctx->gregs [AMD64_RIP];
699 ctx->gregs [AMD64_RSP] = sp;
700 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
704 * mono_arch_handle_exception:
705 * \param ctx saved processor state
706 * \param obj the exception object
709 mono_arch_handle_exception (void *sigctx, gpointer obj)
711 #if defined(MONO_ARCH_USE_SIGACTION)
715 * Handling the exception in the signal handler is problematic, since the original
716 * signal is disabled, and we could run arbitrary code though the debugger. So
717 * resume into the normal stack and do most work there if possible.
719 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
721 /* Pass the ctx parameter in TLS */
722 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
724 mctx = jit_tls->ex_ctx;
725 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
726 mono_monoctx_to_sigctx (&mctx, sigctx);
732 mono_sigctx_to_monoctx (sigctx, &mctx);
734 mono_handle_exception (&mctx, obj);
736 mono_monoctx_to_sigctx (&mctx, sigctx);
743 mono_arch_ip_from_context (void *sigctx)
745 #if defined(MONO_ARCH_USE_SIGACTION)
746 ucontext_t *ctx = (ucontext_t*)sigctx;
748 return (gpointer)UCONTEXT_REG_RIP (ctx);
749 #elif defined(HOST_WIN32)
750 return ((CONTEXT*)sigctx)->Rip;
752 MonoContext *ctx = sigctx;
753 return (gpointer)ctx->gregs [AMD64_RIP];
758 restore_soft_guard_pages (void)
760 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
761 if (jit_tls->stack_ovf_guard_base)
762 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
766 * this function modifies mctx so that when it is restored, it
767 * won't execcute starting at mctx.eip, but in a function that
768 * will restore the protection on the soft-guard pages and return back to
769 * continue at mctx.eip.
772 prepare_for_guard_pages (MonoContext *mctx)
775 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
777 /* the return addr */
778 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
779 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
780 mctx->gregs [AMD64_RSP] = (guint64)sp;
784 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
787 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
790 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
794 mono_handle_exception (&mctx, obj);
796 prepare_for_guard_pages (&mctx);
797 mono_restore_context (&mctx);
801 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
803 #if defined(MONO_ARCH_USE_SIGACTION)
804 MonoException *exc = NULL;
807 MonoContext *copied_ctx;
810 exc = mono_domain_get ()->stack_overflow_ex;
812 /* setup a call frame on the real stack so that control is returned there
813 * and exception handling can continue.
814 * The frame looks like:
818 * 128 is the size of the red zone
820 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
823 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
824 sp = (gpointer *)((char*)sp - frame_size);
825 copied_ctx = (MonoContext*)(sp + 4);
826 /* the arguments must be aligned */
827 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
828 mono_sigctx_to_monoctx (sigctx, copied_ctx);
829 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
830 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
831 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
832 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
833 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
834 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
839 mono_amd64_get_original_ip (void)
841 MonoLMF *lmf = mono_get_lmf ();
845 /* Reset the change to previous_lmf */
846 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
853 mono_amd64_get_exception_trampolines (gboolean aot)
856 GSList *tramps = NULL;
858 /* LLVM needs different throw trampolines */
859 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
860 tramps = g_slist_prepend (tramps, info);
862 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
863 tramps = g_slist_prepend (tramps, info);
865 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
866 tramps = g_slist_prepend (tramps, info);
870 #endif /* !DISABLE_JIT */
873 mono_arch_exceptions_init (void)
879 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
880 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
881 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
882 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
883 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
884 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
886 /* Call this to avoid initialization races */
887 tramps = mono_amd64_get_exception_trampolines (FALSE);
888 for (l = tramps; l; l = l->next) {
889 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
891 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
892 mono_tramp_info_register (info, NULL);
894 g_slist_free (tramps);
898 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
901 mono_arch_unwindinfo_create (gpointer* monoui)
903 PUNWIND_INFO newunwindinfo;
904 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
905 newunwindinfo->Version = 1;
909 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
911 PUNWIND_CODE unwindcode;
914 g_assert (unwindinfo != NULL);
916 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
917 g_error ("Larger allocation needed for the unwind information.");
919 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
920 unwindcode = &unwindinfo->UnwindCode [codeindex];
921 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
922 unwindcode->CodeOffset = (guchar)unwind_op->when;
923 unwindcode->OpInfo = unwind_op->reg;
925 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
926 g_error ("Adding unwind info in wrong order.");
928 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
932 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
934 PUNWIND_CODE unwindcode;
937 g_assert (unwindinfo != NULL);
939 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
940 g_error ("Larger allocation needed for the unwind information.");
942 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
943 unwindcode = &unwindinfo->UnwindCode [codeindex];
944 unwindcode->UnwindOp = UWOP_SET_FPREG;
945 unwindcode->CodeOffset = (guchar)unwind_op->when;
947 g_assert (unwind_op->val % 16 == 0);
948 unwindinfo->FrameRegister = unwind_op->reg;
949 unwindinfo->FrameOffset = unwind_op->val / 16;
951 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
952 g_error ("Adding unwind info in wrong order.");
954 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
958 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
960 PUNWIND_CODE unwindcode;
965 g_assert (unwindinfo != NULL);
967 size = unwind_op->val;
970 g_error ("Stack allocation must be equal to or greater than 0x8.");
974 else if (size <= 0x7FFF8)
979 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
980 g_error ("Larger allocation needed for the unwind information.");
982 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
983 unwindcode = &unwindinfo->UnwindCode [codeindex];
985 unwindcode->CodeOffset = (guchar)unwind_op->when;
987 if (codesneeded == 1) {
988 /*The size of the allocation is
989 (the number in the OpInfo member) times 8 plus 8*/
990 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
991 unwindcode->OpInfo = (size - 8)/8;
994 if (codesneeded == 3) {
995 /*the unscaled size of the allocation is recorded
996 in the next two slots in little-endian format.
997 NOTE, unwind codes are allocated from end to begining of list so
998 unwind code will have right execution order. List is sorted on CodeOffset
999 using descending sort order.*/
1000 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1001 unwindcode->OpInfo = 1;
1002 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1005 /*the size of the allocation divided by 8
1006 is recorded in the next slot.
1007 NOTE, unwind codes are allocated from end to begining of list so
1008 unwind code will have right execution order. List is sorted on CodeOffset
1009 using descending sort order.*/
1010 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1011 unwindcode->OpInfo = 0;
1012 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1016 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1017 g_error ("Adding unwind info in wrong order.");
1019 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1022 static gboolean g_dyn_func_table_inited;
1024 // Dynamic function table used when registering unwind info for OS unwind support.
1025 static GList *g_dynamic_function_table_begin;
1026 static GList *g_dynamic_function_table_end;
1028 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1029 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1031 // Module handle used when explicit loading ntdll.
1032 static HMODULE g_ntdll;
1034 // If Win8 or Win2012Server or later, use growable function tables instead
1035 // of callbacks. Callback solution will still be fallback on older systems.
1036 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1037 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1038 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1040 // When using function table callback solution an out of proc module is needed by
1041 // debuggers in order to read unwind info from debug target.
1043 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1045 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1048 #define MONO_DAC_MODULE_MAX_PATH 1024
1051 init_table_no_lock (void)
1053 if (g_dyn_func_table_inited == FALSE) {
1054 g_assert_checked (g_dynamic_function_table_begin == NULL);
1055 g_assert_checked (g_dynamic_function_table_end == NULL);
1056 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1057 g_assert_checked (g_rtl_grow_function_table == NULL);
1058 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1059 g_assert_checked (g_ntdll == NULL);
1061 // Load functions available on Win8/Win2012Server or later. If running on earlier
1062 // systems the below GetProceAddress will fail, this is expected behavior.
1063 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1064 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1065 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1066 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1069 g_dyn_func_table_inited = TRUE;
1074 mono_arch_unwindinfo_init_table (void)
1076 if (g_dyn_func_table_inited == FALSE) {
1078 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1080 init_table_no_lock ();
1082 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1087 terminate_table_no_lock (void)
1089 if (g_dyn_func_table_inited == TRUE) {
1090 if (g_dynamic_function_table_begin != NULL) {
1091 // Free all list elements.
1092 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1100 g_list_free (g_dynamic_function_table_begin);
1101 g_dynamic_function_table_begin = NULL;
1102 g_dynamic_function_table_end = NULL;
1105 g_rtl_delete_growable_function_table = NULL;
1106 g_rtl_grow_function_table = NULL;
1107 g_rtl_add_growable_function_table = NULL;
1109 if (g_ntdll != NULL) {
1110 FreeLibrary (g_ntdll);
1114 g_dyn_func_table_inited = FALSE;
1119 mono_arch_unwindinfo_terminate_table (void)
1121 if (g_dyn_func_table_inited == TRUE) {
1123 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1125 terminate_table_no_lock ();
1127 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1132 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1134 GList *found_entry = NULL;
1136 // Fast path, look at boundaries.
1137 if (g_dynamic_function_table_begin != NULL) {
1138 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1139 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1141 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1142 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1143 // Entry belongs to first entry in list.
1144 found_entry = g_dynamic_function_table_begin;
1145 *continue_search = FALSE;
1147 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1148 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1149 // Entry has a range that could exist in table, continue search.
1150 *continue_search = TRUE;
1159 static inline DynamicFunctionTableEntry *
1160 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1162 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1163 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1167 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1169 GList *found_entry = NULL;
1170 gboolean continue_search = FALSE;
1172 gsize begin_range = (gsize)code_block;
1173 gsize end_range = begin_range + block_size;
1175 // Fast path, check table boundaries.
1176 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1177 if (found_entry || continue_search == FALSE)
1180 // Scan table for an entry including range.
1181 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1182 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1183 g_assert_checked (current_entry != NULL);
1185 // Do we have a match?
1186 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1195 static inline DynamicFunctionTableEntry *
1196 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1198 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1199 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1203 find_pc_in_table_no_lock_ex (const gpointer pc)
1205 GList *found_entry = NULL;
1206 gboolean continue_search = FALSE;
1208 gsize begin_range = (gsize)pc;
1209 gsize end_range = begin_range;
1211 // Fast path, check table boundaries.
1212 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1213 if (found_entry || continue_search == FALSE)
1216 // Scan table for a entry including range.
1217 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1218 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1219 g_assert_checked (current_entry != NULL);
1221 // Do we have a match?
1222 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1231 static inline DynamicFunctionTableEntry *
1232 find_pc_in_table_no_lock (const gpointer pc)
1234 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1235 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1238 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1240 validate_table_no_lock (void)
1242 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1243 // Method will assert on failure to explicitly indicate what check failed.
1244 if (g_dynamic_function_table_begin != NULL) {
1245 g_assert_checked (g_dynamic_function_table_end != NULL);
1247 DynamicFunctionTableEntry *prevoious_entry = NULL;
1248 DynamicFunctionTableEntry *current_entry = NULL;
1249 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1250 current_entry = (DynamicFunctionTableEntry *)node->data;
1252 g_assert_checked (current_entry != NULL);
1253 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1255 if (prevoious_entry != NULL) {
1256 // List should be sorted in descending order on begin_range.
1257 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1259 // Check for overlapped regions.
1260 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1263 prevoious_entry = current_entry;
1271 validate_table_no_lock (void)
1275 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1278 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1280 DynamicFunctionTableEntry *
1281 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1283 DynamicFunctionTableEntry *new_entry = NULL;
1285 gsize begin_range = (gsize)code_block;
1286 gsize end_range = begin_range + block_size;
1288 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1289 init_table_no_lock ();
1290 new_entry = find_range_in_table_no_lock (code_block, block_size);
1291 if (new_entry == NULL) {
1292 // Allocate new entry.
1293 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1294 if (new_entry != NULL) {
1296 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1297 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1298 InitializeSRWLock (&new_entry->lock);
1299 new_entry->handle = NULL;
1300 new_entry->begin_range = begin_range;
1301 new_entry->end_range = end_range;
1302 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1303 new_entry->rt_funcs_current_count = 0;
1304 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1306 if (new_entry->rt_funcs != NULL) {
1307 // Check insert on boundaries. List is sorted descending on begin_range.
1308 if (g_dynamic_function_table_begin == NULL) {
1309 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1310 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1311 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1312 // Insert at the head.
1313 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1314 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1316 g_list_append (g_dynamic_function_table_end, new_entry);
1317 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1319 //Search and insert at correct position.
1320 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1321 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1322 g_assert_checked (current_entry != NULL);
1324 if (current_entry->begin_range < new_entry->begin_range) {
1325 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1331 // Register dynamic function table entry with OS.
1332 if (g_rtl_add_growable_function_table != NULL) {
1333 // Allocate new growable handle table for entry.
1334 g_assert_checked (new_entry->handle == NULL);
1335 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1336 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1337 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1340 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1341 WCHAR *path = buffer;
1343 // DAC module should be in the same directory as the
1345 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1346 path = wcsrchr (buffer, TEXT('\\'));
1352 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1355 // Register function table callback + out of proc module.
1356 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1357 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1358 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1359 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1363 // Only included in checked builds. Validates the structure of table after insert.
1364 validate_table_no_lock ();
1372 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1378 remove_range_in_table_no_lock (GList *entry)
1380 if (entry != NULL) {
1381 if (entry == g_dynamic_function_table_end)
1382 g_dynamic_function_table_end = entry->prev;
1384 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1385 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1387 g_assert_checked (removed_entry != NULL);
1388 g_assert_checked (removed_entry->rt_funcs != NULL);
1390 // Remove function table from OS.
1391 if (removed_entry->handle != NULL) {
1392 if (g_rtl_delete_growable_function_table != NULL) {
1393 g_rtl_delete_growable_function_table (removed_entry->handle);
1395 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1399 g_free (removed_entry->rt_funcs);
1400 g_free (removed_entry);
1402 g_list_free_1 (entry);
1405 // Only included in checked builds. Validates the structure of table after remove.
1406 validate_table_no_lock ();
1410 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1412 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1414 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1416 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1417 remove_range_in_table_no_lock (found_entry);
1419 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1423 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1425 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1427 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1429 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1430 remove_range_in_table_no_lock (found_entry);
1432 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1436 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1438 PRUNTIME_FUNCTION found_rt_func = NULL;
1440 gsize begin_range = (gsize)code;
1441 gsize end_range = begin_range + code_size;
1443 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1445 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1447 if (found_entry != NULL) {
1449 AcquireSRWLockShared (&found_entry->lock);
1451 g_assert_checked (found_entry->begin_range <= begin_range);
1452 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1453 g_assert_checked (found_entry->rt_funcs != NULL);
1455 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1456 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1458 // Is this our RT function entry?
1459 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1460 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1461 found_rt_func = current_rt_func;
1466 ReleaseSRWLockShared (&found_entry->lock);
1469 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1471 return found_rt_func;
1474 inline PRUNTIME_FUNCTION
1475 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1477 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1480 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1482 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1484 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1485 // Method will assert on failure to explicitly indicate what check failed.
1486 g_assert_checked (entry != NULL);
1487 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1488 g_assert_checked (entry->rt_funcs != NULL);
1490 PRUNTIME_FUNCTION current_rt_func = NULL;
1491 PRUNTIME_FUNCTION previous_rt_func = NULL;
1492 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1493 current_rt_func = &(entry->rt_funcs [i]);
1495 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1496 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1498 if (previous_rt_func != NULL) {
1499 // List should be sorted in ascending order based on BeginAddress.
1500 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1502 // Check for overlapped regions.
1503 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1506 previous_rt_func = current_rt_func;
1513 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1517 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1520 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1522 PRUNTIME_FUNCTION new_rt_func = NULL;
1524 gsize begin_range = (gsize)code;
1525 gsize end_range = begin_range + code_size;
1527 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1529 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1531 if (found_entry != NULL) {
1533 AcquireSRWLockExclusive (&found_entry->lock);
1535 g_assert_checked (found_entry->begin_range <= begin_range);
1536 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1537 g_assert_checked (found_entry->rt_funcs != NULL);
1538 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1540 gsize code_offset = (gsize)code - found_entry->begin_range;
1541 gsize entry_count = found_entry->rt_funcs_current_count;
1542 gsize max_entry_count = found_entry->rt_funcs_max_count;
1543 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1545 RUNTIME_FUNCTION new_rt_func_data;
1546 new_rt_func_data.BeginAddress = code_offset;
1547 new_rt_func_data.EndAddress = code_offset + code_size;
1549 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1550 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1552 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1554 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1556 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1557 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1558 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1559 new_rt_func = &(current_rt_funcs [entry_count]);
1560 *new_rt_func = new_rt_func_data;
1563 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1564 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1565 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1567 if (new_rt_funcs != NULL) {
1568 gsize from_index = 0;
1571 // Copy from old table into new table. Make sure new rt func gets inserted
1572 // into correct location based on sort order.
1573 for (; from_index < entry_count; ++from_index) {
1574 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1575 new_rt_func = &(new_rt_funcs [to_index++]);
1576 *new_rt_func = new_rt_func_data;
1579 if (current_rt_funcs [from_index].UnwindData != 0)
1580 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1583 // If we didn't insert by now, put it last in the list.
1584 if (new_rt_func == NULL) {
1585 new_rt_func = &(new_rt_funcs [to_index]);
1586 *new_rt_func = new_rt_func_data;
1593 // Update the stats for current entry.
1594 found_entry->rt_funcs_current_count = entry_count;
1595 found_entry->rt_funcs_max_count = max_entry_count;
1597 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1598 // No new table just report increase in use.
1599 g_assert_checked (found_entry->handle != NULL);
1600 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1601 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1602 // New table, delete old table and rt funcs, and register a new one.
1603 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1604 g_rtl_delete_growable_function_table (found_entry->handle);
1605 found_entry->handle = NULL;
1606 g_free (found_entry->rt_funcs);
1607 found_entry->rt_funcs = new_rt_funcs;
1608 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1609 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1610 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1612 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1613 // No table registered with OS, callback solution in use. Switch tables.
1614 g_free (found_entry->rt_funcs);
1615 found_entry->rt_funcs = new_rt_funcs;
1616 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1617 // No table registered with OS, callback solution in use, nothing to do.
1619 g_assert_not_reached ();
1622 // Only included in checked builds. Validates the structure of table after insert.
1623 validate_rt_funcs_in_table_no_lock (found_entry);
1625 ReleaseSRWLockExclusive (&found_entry->lock);
1628 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1633 static PRUNTIME_FUNCTION
1634 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1636 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1640 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1642 if (unwind_ops != NULL && unwindinfo != NULL) {
1643 MonoUnwindOp *unwind_op_data;
1644 gboolean sp_alloced = FALSE;
1645 gboolean fp_alloced = FALSE;
1647 // Replay collected unwind info and setup Windows format.
1648 for (GSList *l = unwind_ops; l; l = l->next) {
1649 unwind_op_data = (MonoUnwindOp *)l->data;
1650 switch (unwind_op_data->op) {
1651 case DW_CFA_offset : {
1652 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1653 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1654 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1655 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1658 case DW_CFA_mono_sp_alloc_info_win64 : {
1659 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1663 case DW_CFA_mono_fp_alloc_info_win64 : {
1664 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1676 initialize_unwind_info_internal (GSList *unwind_ops)
1678 PUNWIND_INFO unwindinfo;
1680 mono_arch_unwindinfo_create (&unwindinfo);
1681 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1687 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1689 UNWIND_INFO unwindinfo = {0};
1690 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1691 return unwindinfo.CountOfCodes;
1695 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1697 MonoCompile * current_cfg = (MonoCompile *)cfg;
1698 g_assert (current_cfg->arch.unwindinfo == NULL);
1699 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1700 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1704 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1706 PUNWIND_INFO unwindinfo, targetinfo;
1708 guint64 targetlocation;
1712 unwindinfo = (PUNWIND_INFO)*monoui;
1713 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1714 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1716 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1718 codecount = unwindinfo->CountOfCodes;
1720 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1721 sizeof (UNWIND_CODE) * codecount);
1724 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1726 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1727 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1729 for (int current = 0; current < codecount; current++) {
1730 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1732 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1733 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1741 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1743 g_free (unwindinfo);
1746 // Register unwind info in table.
1747 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1751 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1753 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1754 if (unwindinfo != NULL) {
1755 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1760 mono_arch_code_chunk_new (void *chunk, int size)
1762 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1765 void mono_arch_code_chunk_destroy (void *chunk)
1767 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1769 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1771 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1772 MonoContinuationRestore
1773 mono_tasklets_arch_restore (void)
1775 static guint8* saved = NULL;
1776 guint8 *code, *start;
1777 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1778 const guint kMaxCodeSize = 64;
1782 return (MonoContinuationRestore)saved;
1783 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1784 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1785 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1786 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1787 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1788 * We move cont to cont_reg since we need both rcx and rdi for the copy
1789 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1791 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1792 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1793 /* setup the copy of the stack */
1794 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1795 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1797 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1798 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1799 amd64_prefix (code, X86_REP_PREFIX);
1802 /* now restore the registers from the LMF */
1803 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1804 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1805 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1808 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1810 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1813 /* state is already in rax */
1814 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1815 g_assert ((code - start) <= kMaxCodeSize);
1817 mono_arch_flush_icache (start, code - start);
1818 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1821 return (MonoContinuationRestore)saved;
1823 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1826 * mono_arch_setup_resume_sighandler_ctx:
1828 * Setup CTX so execution continues at FUNC.
1831 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1834 * When resuming from a signal handler, the stack should be misaligned, just like right after
1837 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1838 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1839 MONO_CONTEXT_SET_IP (ctx, func);
1844 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1846 g_assert_not_reached ();
1851 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1853 g_assert_not_reached ();
1858 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1860 g_assert_not_reached ();
1865 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1867 g_assert_not_reached ();
1872 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1874 g_assert_not_reached ();
1879 mono_amd64_get_exception_trampolines (gboolean aot)
1881 g_assert_not_reached ();
1884 #endif /* DISABLE_JIT */
1886 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1887 MonoContinuationRestore
1888 mono_tasklets_arch_restore (void)
1890 g_assert_not_reached ();
1893 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */