3 * exception support for AMD64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
22 #ifdef HAVE_UCONTEXT_H
26 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/metadata/abi-details.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/tabledefs.h>
30 #include <mono/metadata/threads.h>
31 #include <mono/metadata/threads-types.h>
32 #include <mono/metadata/debug-helpers.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/gc-internals.h>
35 #include <mono/metadata/mono-debug.h>
36 #include <mono/utils/mono-mmap.h>
39 #include "mini-amd64.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
45 static MonoW32ExceptionHandler fpe_handler;
46 static MonoW32ExceptionHandler ill_handler;
47 static MonoW32ExceptionHandler segv_handler;
49 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
50 void *mono_win_vectored_exception_handle;
52 #define W32_SEH_HANDLE_EX(_ex) \
53 if (_ex##_handler) _ex##_handler(0, ep, ctx)
55 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
57 #ifndef MONO_CROSS_COMPILE
58 if (mono_old_win_toplevel_exception_filter) {
59 return (*mono_old_win_toplevel_exception_filter)(ep);
63 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
65 return EXCEPTION_CONTINUE_SEARCH;
69 * Unhandled Exception Filter
70 * Top-level per-process exception handler.
72 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
77 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
79 /* If the thread is not managed by the runtime return early */
81 return EXCEPTION_CONTINUE_SEARCH;
83 jit_tls->mono_win_chained_exception_needs_run = FALSE;
84 res = EXCEPTION_CONTINUE_EXECUTION;
86 er = ep->ExceptionRecord;
87 ctx = ep->ContextRecord;
89 switch (er->ExceptionCode) {
90 case EXCEPTION_ACCESS_VIOLATION:
91 W32_SEH_HANDLE_EX(segv);
93 case EXCEPTION_ILLEGAL_INSTRUCTION:
94 W32_SEH_HANDLE_EX(ill);
96 case EXCEPTION_INT_DIVIDE_BY_ZERO:
97 case EXCEPTION_INT_OVERFLOW:
98 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
99 case EXCEPTION_FLT_OVERFLOW:
100 case EXCEPTION_FLT_UNDERFLOW:
101 case EXCEPTION_FLT_INEXACT_RESULT:
102 W32_SEH_HANDLE_EX(fpe);
105 jit_tls->mono_win_chained_exception_needs_run = TRUE;
109 if (jit_tls->mono_win_chained_exception_needs_run) {
110 /* Don't copy context back if we chained exception
111 * as the handler may have modfied the EXCEPTION_POINTERS
112 * directly. We don't pass sigcontext to chained handlers.
113 * Return continue search so the UnhandledExceptionFilter
114 * can correctly chain the exception.
116 res = EXCEPTION_CONTINUE_SEARCH;
122 void win32_seh_init()
124 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
125 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
128 void win32_seh_cleanup()
132 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
134 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
138 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
142 fpe_handler = handler;
145 ill_handler = handler;
148 segv_handler = handler;
155 #endif /* TARGET_WIN32 */
159 * mono_arch_get_restore_context:
161 * Returns a pointer to a method which restores a previously saved sigcontext.
164 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
166 guint8 *start = NULL;
168 MonoJumpInfo *ji = NULL;
169 GSList *unwind_ops = NULL;
172 /* restore_contect (MonoContext *ctx) */
174 start = code = (guint8 *)mono_global_codeman_reserve (256);
176 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
178 /* Restore all registers except %rip and %r11 */
179 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
180 for (i = 0; i < AMD64_NREG; ++i) {
181 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
182 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
186 * The context resides on the stack, in the stack frame of the
187 * caller of this function. The stack pointer that we need to
188 * restore is potentially many stack frames higher up, so the
189 * distance between them can easily be more than the red zone
190 * size. Hence the stack pointer can be restored only after
191 * we have finished loading everything from the context.
193 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
194 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
195 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
197 /* jump to the saved IP */
198 amd64_jump_reg (code, AMD64_R11);
200 mono_arch_flush_icache (start, code - start);
201 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
204 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
210 * mono_arch_get_call_filter:
212 * Returns a pointer to a method which calls an exception filter. We
213 * also use this function to call finally handlers (we pass NULL as
214 * @exc object in this case).
217 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
223 MonoJumpInfo *ji = NULL;
224 GSList *unwind_ops = NULL;
225 const guint kMaxCodeSize = 128;
227 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
229 /* call_filter (MonoContext *ctx, unsigned long eip) */
232 /* Alloc new frame */
233 amd64_push_reg (code, AMD64_RBP);
234 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
236 /* Save callee saved regs */
238 for (i = 0; i < AMD64_NREG; ++i)
239 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
240 amd64_push_reg (code, i);
246 amd64_push_reg (code, AMD64_RBP);
248 /* Make stack misaligned, the call will make it aligned again */
250 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
252 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
255 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
256 /* load callee saved regs */
257 for (i = 0; i < AMD64_NREG; ++i) {
258 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
259 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
261 /* load exc register */
262 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
264 /* call the handler */
265 amd64_call_reg (code, AMD64_ARG_REG2);
268 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
271 amd64_pop_reg (code, AMD64_RBP);
273 /* Restore callee saved regs */
274 for (i = AMD64_NREG; i >= 0; --i)
275 if (AMD64_IS_CALLEE_SAVED_REG (i))
276 amd64_pop_reg (code, i);
279 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
280 amd64_pop_reg (code, AMD64_RBP);
286 g_assert ((code - start) < kMaxCodeSize);
288 mono_arch_flush_icache (start, code - start);
289 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
292 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
296 #endif /* !DISABLE_JIT */
299 * The first few arguments are dummy, to force the other arguments to be passed on
300 * the stack, this avoids overwriting the argument registers in the throw trampoline.
303 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
304 guint64 dummy5, guint64 dummy6,
305 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
310 /* mctx is on the caller's stack */
311 memcpy (&ctx, mctx, sizeof (MonoContext));
313 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
314 MonoException *mono_ex = (MonoException*)exc;
316 mono_ex->stack_trace = NULL;
317 mono_ex->trace_ips = NULL;
320 mono_error_assert_ok (&error);
322 /* adjust eip so that it point into the call instruction */
323 ctx.gregs [AMD64_RIP] --;
325 mono_handle_exception (&ctx, exc);
326 mono_restore_context (&ctx);
327 g_assert_not_reached ();
331 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
332 guint64 dummy5, guint64 dummy6,
333 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
335 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
338 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
340 mctx->gregs [AMD64_RIP] -= pc_offset;
342 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
343 mctx->gregs [AMD64_RIP] += 1;
345 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
349 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
350 guint64 dummy5, guint64 dummy6,
351 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
353 /* Only the register parameters are valid */
356 /* mctx is on the caller's stack */
357 memcpy (&ctx, mctx, sizeof (MonoContext));
359 mono_resume_unwind (&ctx);
364 * get_throw_trampoline:
366 * Generate a call to mono_amd64_throw_exception/
367 * mono_amd64_throw_corlib_exception.
370 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
374 MonoJumpInfo *ji = NULL;
375 GSList *unwind_ops = NULL;
376 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
377 const guint kMaxCodeSize = 256;
380 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
382 dummy_stack_space = 0;
386 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
388 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
390 /* The stack is unaligned on entry */
391 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
396 unwind_ops = mono_arch_get_cie_program ();
399 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
401 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
402 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
406 * To hide linux/windows calling convention differences, we pass all arguments on
407 * the stack by passing 6 dummy values in registers.
410 arg_offsets [0] = dummy_stack_space + 0;
411 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
412 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
413 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
414 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
417 for (i = 0; i < AMD64_NREG; ++i)
419 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
421 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
422 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
424 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
425 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
426 /* Set arg1 == ctx */
427 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
428 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
429 /* Set arg2 == exc/ex_token_index */
431 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
433 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
434 /* Set arg3 == rethrow/pc offset */
436 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
440 * The caller doesn't pass in a pc/pc offset, instead we simply use the
441 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
443 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
445 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
447 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
451 const char *icall_name;
454 icall_name = "mono_amd64_resume_unwind";
456 icall_name = "mono_amd64_throw_corlib_exception";
458 icall_name = "mono_amd64_throw_exception";
459 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
460 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
462 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
464 amd64_call_reg (code, AMD64_R11);
465 amd64_breakpoint (code);
467 mono_arch_flush_icache (start, code - start);
469 g_assert ((code - start) < kMaxCodeSize);
470 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
472 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
475 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
481 * mono_arch_get_throw_exception:
482 * \returns a function pointer which can be used to raise
483 * exceptions. The returned function has the following
484 * signature: void (*func) (MonoException *exc);
487 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
489 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
493 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
495 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
499 * mono_arch_get_throw_corlib_exception:
501 * Returns a function pointer which can be used to raise
502 * corlib exceptions. The returned function has the following
503 * signature: void (*func) (guint32 ex_token, guint32 offset);
504 * Here, offset is the offset which needs to be substracted from the caller IP
505 * to get the IP of the throw. Passing the offset has the advantage that it
506 * needs no relocations in the caller.
509 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
511 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
513 #endif /* !DISABLE_JIT */
516 * mono_arch_unwind_frame:
518 * This function is used to gather information from @ctx, and store it in @frame_info.
519 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
520 * is modified if needed.
521 * Returns TRUE on success, FALSE otherwise.
524 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
525 MonoJitInfo *ji, MonoContext *ctx,
526 MonoContext *new_ctx, MonoLMF **lmf,
527 mgreg_t **save_locations,
528 StackFrameInfo *frame)
530 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
533 memset (frame, 0, sizeof (StackFrameInfo));
539 mgreg_t regs [MONO_MAX_IREGS + 1];
541 guint32 unwind_info_len;
543 guint8 *epilog = NULL;
545 if (ji->is_trampoline)
546 frame->type = FRAME_TYPE_TRAMPOLINE;
548 frame->type = FRAME_TYPE_MANAGED;
550 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
552 frame->unwind_info = unwind_info;
553 frame->unwind_info_len = unwind_info_len;
556 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
557 mono_print_unwind_info (unwind_info, unwind_info_len);
559 /* LLVM compiled code doesn't have this info */
560 if (ji->has_arch_eh_info)
561 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
563 for (i = 0; i < AMD64_NREG; ++i)
564 regs [i] = new_ctx->gregs [i];
566 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
567 (guint8*)ji->code_start + ji->code_size,
568 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
569 save_locations, MONO_MAX_IREGS, &cfa);
571 for (i = 0; i < AMD64_NREG; ++i)
572 new_ctx->gregs [i] = regs [i];
574 /* The CFA becomes the new SP value */
575 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
578 new_ctx->gregs [AMD64_RIP] --;
584 if (((guint64)(*lmf)->previous_lmf) & 2) {
585 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
587 if (ext->debugger_invoke) {
589 * This LMF entry is created by the soft debug code to mark transitions to
590 * managed code done during invokes.
592 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
593 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
594 } else if (ext->interp_exit) {
595 frame->type = FRAME_TYPE_INTERP_TO_MANAGED;
596 frame->interp_exit_data = ext->interp_exit_data;
598 g_assert_not_reached ();
601 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
606 if (((guint64)(*lmf)->previous_lmf) & 4) {
607 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
609 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
610 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
611 /* This LMF has the rip field set */
613 } else if ((*lmf)->rsp == 0) {
618 * The rsp field is set just before the call which transitioned to native
619 * code. Obtain the rip from the stack.
621 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
624 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
626 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
627 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
635 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
637 if (((guint64)(*lmf)->previous_lmf) & 4) {
638 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
640 /* Trampoline frame */
641 for (i = 0; i < AMD64_NREG; ++i)
642 new_ctx->gregs [i] = ext->ctx->gregs [i];
644 new_ctx->gregs [AMD64_RIP] --;
647 * The registers saved in the LMF will be restored using the normal unwind info,
648 * when the wrapper frame is processed.
652 new_ctx->gregs [AMD64_RIP] = rip;
653 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
654 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
655 for (i = 0; i < AMD64_NREG; ++i) {
656 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
657 new_ctx->gregs [i] = 0;
661 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
672 * Called by resuming from a signal handler.
675 handle_signal_exception (gpointer obj)
677 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
680 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
682 mono_handle_exception (&ctx, (MonoObject *)obj);
684 mono_restore_context (&ctx);
688 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
690 guint64 sp = ctx->gregs [AMD64_RSP];
692 ctx->gregs [AMD64_RDI] = (guint64)user_data;
694 /* Allocate a stack frame below the red zone */
696 /* The stack should be unaligned */
700 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
701 *(guint64*)sp = ctx->gregs [AMD64_RIP];
703 ctx->gregs [AMD64_RSP] = sp;
704 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
708 * mono_arch_handle_exception:
709 * \param ctx saved processor state
710 * \param obj the exception object
713 mono_arch_handle_exception (void *sigctx, gpointer obj)
715 #if defined(MONO_ARCH_USE_SIGACTION)
719 * Handling the exception in the signal handler is problematic, since the original
720 * signal is disabled, and we could run arbitrary code though the debugger. So
721 * resume into the normal stack and do most work there if possible.
723 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
725 /* Pass the ctx parameter in TLS */
726 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
728 mctx = jit_tls->ex_ctx;
729 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
730 mono_monoctx_to_sigctx (&mctx, sigctx);
736 mono_sigctx_to_monoctx (sigctx, &mctx);
738 mono_handle_exception (&mctx, obj);
740 mono_monoctx_to_sigctx (&mctx, sigctx);
747 mono_arch_ip_from_context (void *sigctx)
749 #if defined(MONO_ARCH_USE_SIGACTION)
750 ucontext_t *ctx = (ucontext_t*)sigctx;
752 return (gpointer)UCONTEXT_REG_RIP (ctx);
753 #elif defined(HOST_WIN32)
754 return ((CONTEXT*)sigctx)->Rip;
756 MonoContext *ctx = sigctx;
757 return (gpointer)ctx->gregs [AMD64_RIP];
762 restore_soft_guard_pages (void)
764 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
765 if (jit_tls->stack_ovf_guard_base)
766 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
770 * this function modifies mctx so that when it is restored, it
771 * won't execcute starting at mctx.eip, but in a function that
772 * will restore the protection on the soft-guard pages and return back to
773 * continue at mctx.eip.
776 prepare_for_guard_pages (MonoContext *mctx)
779 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
781 /* the return addr */
782 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
783 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
784 mctx->gregs [AMD64_RSP] = (guint64)sp;
788 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
791 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
794 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
798 mono_handle_exception (&mctx, obj);
800 prepare_for_guard_pages (&mctx);
801 mono_restore_context (&mctx);
805 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
807 #if defined(MONO_ARCH_USE_SIGACTION)
808 MonoException *exc = NULL;
811 MonoContext *copied_ctx;
814 exc = mono_domain_get ()->stack_overflow_ex;
816 /* setup a call frame on the real stack so that control is returned there
817 * and exception handling can continue.
818 * The frame looks like:
822 * 128 is the size of the red zone
824 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
827 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
828 sp = (gpointer *)((char*)sp - frame_size);
829 copied_ctx = (MonoContext*)(sp + 4);
830 /* the arguments must be aligned */
831 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
832 mono_sigctx_to_monoctx (sigctx, copied_ctx);
833 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
834 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
835 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
836 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
837 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
838 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
843 mono_amd64_get_original_ip (void)
845 MonoLMF *lmf = mono_get_lmf ();
849 /* Reset the change to previous_lmf */
850 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
857 mono_amd64_get_exception_trampolines (gboolean aot)
860 GSList *tramps = NULL;
862 /* LLVM needs different throw trampolines */
863 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
864 tramps = g_slist_prepend (tramps, info);
866 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
867 tramps = g_slist_prepend (tramps, info);
869 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
870 tramps = g_slist_prepend (tramps, info);
874 #endif /* !DISABLE_JIT */
877 mono_arch_exceptions_init (void)
883 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
884 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
885 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
886 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
887 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
888 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
890 /* Call this to avoid initialization races */
891 tramps = mono_amd64_get_exception_trampolines (FALSE);
892 for (l = tramps; l; l = l->next) {
893 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
895 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
896 mono_tramp_info_register (info, NULL);
898 g_slist_free (tramps);
902 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
905 mono_arch_unwindinfo_create (gpointer* monoui)
907 PUNWIND_INFO newunwindinfo;
908 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
909 newunwindinfo->Version = 1;
913 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
915 PUNWIND_CODE unwindcode;
918 g_assert (unwindinfo != NULL);
920 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
921 g_error ("Larger allocation needed for the unwind information.");
923 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
924 unwindcode = &unwindinfo->UnwindCode [codeindex];
925 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
926 unwindcode->CodeOffset = (guchar)unwind_op->when;
927 unwindcode->OpInfo = unwind_op->reg;
929 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
930 g_error ("Adding unwind info in wrong order.");
932 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
936 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
938 PUNWIND_CODE unwindcode;
941 g_assert (unwindinfo != NULL);
943 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
944 g_error ("Larger allocation needed for the unwind information.");
946 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
947 unwindcode = &unwindinfo->UnwindCode [codeindex];
948 unwindcode->UnwindOp = UWOP_SET_FPREG;
949 unwindcode->CodeOffset = (guchar)unwind_op->when;
951 g_assert (unwind_op->val % 16 == 0);
952 unwindinfo->FrameRegister = unwind_op->reg;
953 unwindinfo->FrameOffset = unwind_op->val / 16;
955 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
956 g_error ("Adding unwind info in wrong order.");
958 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
962 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
964 PUNWIND_CODE unwindcode;
969 g_assert (unwindinfo != NULL);
971 size = unwind_op->val;
974 g_error ("Stack allocation must be equal to or greater than 0x8.");
978 else if (size <= 0x7FFF8)
983 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
984 g_error ("Larger allocation needed for the unwind information.");
986 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
987 unwindcode = &unwindinfo->UnwindCode [codeindex];
989 unwindcode->CodeOffset = (guchar)unwind_op->when;
991 if (codesneeded == 1) {
992 /*The size of the allocation is
993 (the number in the OpInfo member) times 8 plus 8*/
994 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
995 unwindcode->OpInfo = (size - 8)/8;
998 if (codesneeded == 3) {
999 /*the unscaled size of the allocation is recorded
1000 in the next two slots in little-endian format.
1001 NOTE, unwind codes are allocated from end to begining of list so
1002 unwind code will have right execution order. List is sorted on CodeOffset
1003 using descending sort order.*/
1004 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1005 unwindcode->OpInfo = 1;
1006 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1009 /*the size of the allocation divided by 8
1010 is recorded in the next slot.
1011 NOTE, unwind codes are allocated from end to begining of list so
1012 unwind code will have right execution order. List is sorted on CodeOffset
1013 using descending sort order.*/
1014 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1015 unwindcode->OpInfo = 0;
1016 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1020 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1021 g_error ("Adding unwind info in wrong order.");
1023 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1026 static gboolean g_dyn_func_table_inited;
1028 // Dynamic function table used when registering unwind info for OS unwind support.
1029 static GList *g_dynamic_function_table_begin;
1030 static GList *g_dynamic_function_table_end;
1032 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1033 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1035 // Module handle used when explicit loading ntdll.
1036 static HMODULE g_ntdll;
1038 // If Win8 or Win2012Server or later, use growable function tables instead
1039 // of callbacks. Callback solution will still be fallback on older systems.
1040 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1041 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1042 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1044 // When using function table callback solution an out of proc module is needed by
1045 // debuggers in order to read unwind info from debug target.
1047 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1049 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1052 #define MONO_DAC_MODULE_MAX_PATH 1024
1055 init_table_no_lock (void)
1057 if (g_dyn_func_table_inited == FALSE) {
1058 g_assert_checked (g_dynamic_function_table_begin == NULL);
1059 g_assert_checked (g_dynamic_function_table_end == NULL);
1060 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1061 g_assert_checked (g_rtl_grow_function_table == NULL);
1062 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1063 g_assert_checked (g_ntdll == NULL);
1065 // Load functions available on Win8/Win2012Server or later. If running on earlier
1066 // systems the below GetProceAddress will fail, this is expected behavior.
1067 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1068 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1069 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1070 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1073 g_dyn_func_table_inited = TRUE;
1078 mono_arch_unwindinfo_init_table (void)
1080 if (g_dyn_func_table_inited == FALSE) {
1082 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1084 init_table_no_lock ();
1086 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1091 terminate_table_no_lock (void)
1093 if (g_dyn_func_table_inited == TRUE) {
1094 if (g_dynamic_function_table_begin != NULL) {
1095 // Free all list elements.
1096 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1104 g_list_free (g_dynamic_function_table_begin);
1105 g_dynamic_function_table_begin = NULL;
1106 g_dynamic_function_table_end = NULL;
1109 g_rtl_delete_growable_function_table = NULL;
1110 g_rtl_grow_function_table = NULL;
1111 g_rtl_add_growable_function_table = NULL;
1113 if (g_ntdll != NULL) {
1114 FreeLibrary (g_ntdll);
1118 g_dyn_func_table_inited = FALSE;
1123 mono_arch_unwindinfo_terminate_table (void)
1125 if (g_dyn_func_table_inited == TRUE) {
1127 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1129 terminate_table_no_lock ();
1131 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1136 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1138 GList *found_entry = NULL;
1140 // Fast path, look at boundaries.
1141 if (g_dynamic_function_table_begin != NULL) {
1142 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1143 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1145 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1146 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1147 // Entry belongs to first entry in list.
1148 found_entry = g_dynamic_function_table_begin;
1149 *continue_search = FALSE;
1151 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1152 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1153 // Entry has a range that could exist in table, continue search.
1154 *continue_search = TRUE;
1163 static inline DynamicFunctionTableEntry *
1164 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1166 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1167 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1171 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1173 GList *found_entry = NULL;
1174 gboolean continue_search = FALSE;
1176 gsize begin_range = (gsize)code_block;
1177 gsize end_range = begin_range + block_size;
1179 // Fast path, check table boundaries.
1180 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1181 if (found_entry || continue_search == FALSE)
1184 // Scan table for an entry including range.
1185 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1186 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1187 g_assert_checked (current_entry != NULL);
1189 // Do we have a match?
1190 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1199 static inline DynamicFunctionTableEntry *
1200 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1202 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1203 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1207 find_pc_in_table_no_lock_ex (const gpointer pc)
1209 GList *found_entry = NULL;
1210 gboolean continue_search = FALSE;
1212 gsize begin_range = (gsize)pc;
1213 gsize end_range = begin_range;
1215 // Fast path, check table boundaries.
1216 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1217 if (found_entry || continue_search == FALSE)
1220 // Scan table for a entry including range.
1221 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1222 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1223 g_assert_checked (current_entry != NULL);
1225 // Do we have a match?
1226 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1235 static inline DynamicFunctionTableEntry *
1236 find_pc_in_table_no_lock (const gpointer pc)
1238 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1239 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1242 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1244 validate_table_no_lock (void)
1246 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1247 // Method will assert on failure to explicitly indicate what check failed.
1248 if (g_dynamic_function_table_begin != NULL) {
1249 g_assert_checked (g_dynamic_function_table_end != NULL);
1251 DynamicFunctionTableEntry *prevoious_entry = NULL;
1252 DynamicFunctionTableEntry *current_entry = NULL;
1253 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1254 current_entry = (DynamicFunctionTableEntry *)node->data;
1256 g_assert_checked (current_entry != NULL);
1257 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1259 if (prevoious_entry != NULL) {
1260 // List should be sorted in descending order on begin_range.
1261 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1263 // Check for overlapped regions.
1264 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1267 prevoious_entry = current_entry;
1275 validate_table_no_lock (void)
1279 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1282 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1284 DynamicFunctionTableEntry *
1285 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1287 DynamicFunctionTableEntry *new_entry = NULL;
1289 gsize begin_range = (gsize)code_block;
1290 gsize end_range = begin_range + block_size;
1292 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1293 init_table_no_lock ();
1294 new_entry = find_range_in_table_no_lock (code_block, block_size);
1295 if (new_entry == NULL) {
1296 // Allocate new entry.
1297 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1298 if (new_entry != NULL) {
1300 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1301 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1302 InitializeSRWLock (&new_entry->lock);
1303 new_entry->handle = NULL;
1304 new_entry->begin_range = begin_range;
1305 new_entry->end_range = end_range;
1306 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1307 new_entry->rt_funcs_current_count = 0;
1308 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1310 if (new_entry->rt_funcs != NULL) {
1311 // Check insert on boundaries. List is sorted descending on begin_range.
1312 if (g_dynamic_function_table_begin == NULL) {
1313 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1314 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1315 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1316 // Insert at the head.
1317 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1318 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1320 g_list_append (g_dynamic_function_table_end, new_entry);
1321 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1323 //Search and insert at correct position.
1324 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1325 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1326 g_assert_checked (current_entry != NULL);
1328 if (current_entry->begin_range < new_entry->begin_range) {
1329 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1335 // Register dynamic function table entry with OS.
1336 if (g_rtl_add_growable_function_table != NULL) {
1337 // Allocate new growable handle table for entry.
1338 g_assert_checked (new_entry->handle == NULL);
1339 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1340 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1341 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1344 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1345 WCHAR *path = buffer;
1347 // DAC module should be in the same directory as the
1349 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1350 path = wcsrchr (buffer, TEXT('\\'));
1356 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1359 // Register function table callback + out of proc module.
1360 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1361 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1362 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1363 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1367 // Only included in checked builds. Validates the structure of table after insert.
1368 validate_table_no_lock ();
1376 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1382 remove_range_in_table_no_lock (GList *entry)
1384 if (entry != NULL) {
1385 if (entry == g_dynamic_function_table_end)
1386 g_dynamic_function_table_end = entry->prev;
1388 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1389 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1391 g_assert_checked (removed_entry != NULL);
1392 g_assert_checked (removed_entry->rt_funcs != NULL);
1394 // Remove function table from OS.
1395 if (removed_entry->handle != NULL) {
1396 if (g_rtl_delete_growable_function_table != NULL) {
1397 g_rtl_delete_growable_function_table (removed_entry->handle);
1399 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1403 g_free (removed_entry->rt_funcs);
1404 g_free (removed_entry);
1406 g_list_free_1 (entry);
1409 // Only included in checked builds. Validates the structure of table after remove.
1410 validate_table_no_lock ();
1414 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1416 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1418 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1420 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1421 remove_range_in_table_no_lock (found_entry);
1423 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1427 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1429 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1431 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1433 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1434 remove_range_in_table_no_lock (found_entry);
1436 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1440 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1442 PRUNTIME_FUNCTION found_rt_func = NULL;
1444 gsize begin_range = (gsize)code;
1445 gsize end_range = begin_range + code_size;
1447 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1449 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1451 if (found_entry != NULL) {
1453 AcquireSRWLockShared (&found_entry->lock);
1455 g_assert_checked (found_entry->begin_range <= begin_range);
1456 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1457 g_assert_checked (found_entry->rt_funcs != NULL);
1459 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1460 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1462 // Is this our RT function entry?
1463 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1464 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1465 found_rt_func = current_rt_func;
1470 ReleaseSRWLockShared (&found_entry->lock);
1473 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1475 return found_rt_func;
1478 inline PRUNTIME_FUNCTION
1479 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1481 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1484 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1486 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1488 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1489 // Method will assert on failure to explicitly indicate what check failed.
1490 g_assert_checked (entry != NULL);
1491 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1492 g_assert_checked (entry->rt_funcs != NULL);
1494 PRUNTIME_FUNCTION current_rt_func = NULL;
1495 PRUNTIME_FUNCTION previous_rt_func = NULL;
1496 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1497 current_rt_func = &(entry->rt_funcs [i]);
1499 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1500 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1502 if (previous_rt_func != NULL) {
1503 // List should be sorted in ascending order based on BeginAddress.
1504 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1506 // Check for overlapped regions.
1507 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1510 previous_rt_func = current_rt_func;
1517 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1521 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1524 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1526 PRUNTIME_FUNCTION new_rt_func = NULL;
1528 gsize begin_range = (gsize)code;
1529 gsize end_range = begin_range + code_size;
1531 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1533 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1535 if (found_entry != NULL) {
1537 AcquireSRWLockExclusive (&found_entry->lock);
1539 g_assert_checked (found_entry->begin_range <= begin_range);
1540 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1541 g_assert_checked (found_entry->rt_funcs != NULL);
1542 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1544 gsize code_offset = (gsize)code - found_entry->begin_range;
1545 gsize entry_count = found_entry->rt_funcs_current_count;
1546 gsize max_entry_count = found_entry->rt_funcs_max_count;
1547 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1549 RUNTIME_FUNCTION new_rt_func_data;
1550 new_rt_func_data.BeginAddress = code_offset;
1551 new_rt_func_data.EndAddress = code_offset + code_size;
1553 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1554 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1556 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1558 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1560 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1561 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1562 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1563 new_rt_func = &(current_rt_funcs [entry_count]);
1564 *new_rt_func = new_rt_func_data;
1567 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1568 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1569 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1571 if (new_rt_funcs != NULL) {
1572 gsize from_index = 0;
1575 // Copy from old table into new table. Make sure new rt func gets inserted
1576 // into correct location based on sort order.
1577 for (; from_index < entry_count; ++from_index) {
1578 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1579 new_rt_func = &(new_rt_funcs [to_index++]);
1580 *new_rt_func = new_rt_func_data;
1583 if (current_rt_funcs [from_index].UnwindData != 0)
1584 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1587 // If we didn't insert by now, put it last in the list.
1588 if (new_rt_func == NULL) {
1589 new_rt_func = &(new_rt_funcs [to_index]);
1590 *new_rt_func = new_rt_func_data;
1597 // Update the stats for current entry.
1598 found_entry->rt_funcs_current_count = entry_count;
1599 found_entry->rt_funcs_max_count = max_entry_count;
1601 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1602 // No new table just report increase in use.
1603 g_assert_checked (found_entry->handle != NULL);
1604 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1605 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1606 // New table, delete old table and rt funcs, and register a new one.
1607 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1608 g_rtl_delete_growable_function_table (found_entry->handle);
1609 found_entry->handle = NULL;
1610 g_free (found_entry->rt_funcs);
1611 found_entry->rt_funcs = new_rt_funcs;
1612 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1613 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1614 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1616 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1617 // No table registered with OS, callback solution in use. Switch tables.
1618 g_free (found_entry->rt_funcs);
1619 found_entry->rt_funcs = new_rt_funcs;
1620 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1621 // No table registered with OS, callback solution in use, nothing to do.
1623 g_assert_not_reached ();
1626 // Only included in checked builds. Validates the structure of table after insert.
1627 validate_rt_funcs_in_table_no_lock (found_entry);
1629 ReleaseSRWLockExclusive (&found_entry->lock);
1632 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1637 static PRUNTIME_FUNCTION
1638 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1640 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1644 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1646 if (unwind_ops != NULL && unwindinfo != NULL) {
1647 MonoUnwindOp *unwind_op_data;
1648 gboolean sp_alloced = FALSE;
1649 gboolean fp_alloced = FALSE;
1651 // Replay collected unwind info and setup Windows format.
1652 for (GSList *l = unwind_ops; l; l = l->next) {
1653 unwind_op_data = (MonoUnwindOp *)l->data;
1654 switch (unwind_op_data->op) {
1655 case DW_CFA_offset : {
1656 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1657 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1658 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1659 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1662 case DW_CFA_mono_sp_alloc_info_win64 : {
1663 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1667 case DW_CFA_mono_fp_alloc_info_win64 : {
1668 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1680 initialize_unwind_info_internal (GSList *unwind_ops)
1682 PUNWIND_INFO unwindinfo;
1684 mono_arch_unwindinfo_create (&unwindinfo);
1685 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1691 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1693 UNWIND_INFO unwindinfo = {0};
1694 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1695 return unwindinfo.CountOfCodes;
1699 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1701 MonoCompile * current_cfg = (MonoCompile *)cfg;
1702 g_assert (current_cfg->arch.unwindinfo == NULL);
1703 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1704 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1708 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1710 PUNWIND_INFO unwindinfo, targetinfo;
1712 guint64 targetlocation;
1716 unwindinfo = (PUNWIND_INFO)*monoui;
1717 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1718 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1720 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1722 codecount = unwindinfo->CountOfCodes;
1724 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1725 sizeof (UNWIND_CODE) * codecount);
1728 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1730 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1731 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1733 for (int current = 0; current < codecount; current++) {
1734 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1736 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1737 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1745 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1747 g_free (unwindinfo);
1750 // Register unwind info in table.
1751 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1755 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1757 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1758 if (unwindinfo != NULL) {
1759 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1764 mono_arch_code_chunk_new (void *chunk, int size)
1766 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1769 void mono_arch_code_chunk_destroy (void *chunk)
1771 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1773 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1775 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1776 MonoContinuationRestore
1777 mono_tasklets_arch_restore (void)
1779 static guint8* saved = NULL;
1780 guint8 *code, *start;
1781 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1782 const guint kMaxCodeSize = 64;
1786 return (MonoContinuationRestore)saved;
1787 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1788 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1789 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1790 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1791 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1792 * We move cont to cont_reg since we need both rcx and rdi for the copy
1793 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1795 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1796 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1797 /* setup the copy of the stack */
1798 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1799 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1801 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1802 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1803 amd64_prefix (code, X86_REP_PREFIX);
1806 /* now restore the registers from the LMF */
1807 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1808 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1809 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1812 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1814 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1817 /* state is already in rax */
1818 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1819 g_assert ((code - start) <= kMaxCodeSize);
1821 mono_arch_flush_icache (start, code - start);
1822 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1825 return (MonoContinuationRestore)saved;
1827 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1830 * mono_arch_setup_resume_sighandler_ctx:
1832 * Setup CTX so execution continues at FUNC.
1835 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1838 * When resuming from a signal handler, the stack should be misaligned, just like right after
1841 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1842 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1843 MONO_CONTEXT_SET_IP (ctx, func);
1848 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1850 g_assert_not_reached ();
1855 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1857 g_assert_not_reached ();
1862 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1864 g_assert_not_reached ();
1869 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1871 g_assert_not_reached ();
1876 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1878 g_assert_not_reached ();
1883 mono_amd64_get_exception_trampolines (gboolean aot)
1885 g_assert_not_reached ();
1888 #endif /* DISABLE_JIT */
1890 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1891 MonoContinuationRestore
1892 mono_tasklets_arch_restore (void)
1894 g_assert_not_reached ();
1897 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */