3 * exception support for AMD64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
22 #ifdef HAVE_UCONTEXT_H
26 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/metadata/abi-details.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/tabledefs.h>
30 #include <mono/metadata/threads.h>
31 #include <mono/metadata/threads-types.h>
32 #include <mono/metadata/debug-helpers.h>
33 #include <mono/metadata/exception.h>
34 #include <mono/metadata/gc-internals.h>
35 #include <mono/metadata/mono-debug.h>
36 #include <mono/utils/mono-mmap.h>
39 #include "mini-amd64.h"
42 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
45 static MonoW32ExceptionHandler fpe_handler;
46 static MonoW32ExceptionHandler ill_handler;
47 static MonoW32ExceptionHandler segv_handler;
49 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
50 void *mono_win_vectored_exception_handle;
52 #define W32_SEH_HANDLE_EX(_ex) \
53 if (_ex##_handler) _ex##_handler(0, ep, ctx)
55 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
57 #ifndef MONO_CROSS_COMPILE
58 if (mono_old_win_toplevel_exception_filter) {
59 return (*mono_old_win_toplevel_exception_filter)(ep);
63 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
65 return EXCEPTION_CONTINUE_SEARCH;
69 * Unhandled Exception Filter
70 * Top-level per-process exception handler.
72 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
77 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
79 /* If the thread is not managed by the runtime return early */
81 return EXCEPTION_CONTINUE_SEARCH;
83 jit_tls->mono_win_chained_exception_needs_run = FALSE;
84 res = EXCEPTION_CONTINUE_EXECUTION;
86 er = ep->ExceptionRecord;
87 ctx = ep->ContextRecord;
89 switch (er->ExceptionCode) {
90 case EXCEPTION_ACCESS_VIOLATION:
91 W32_SEH_HANDLE_EX(segv);
93 case EXCEPTION_ILLEGAL_INSTRUCTION:
94 W32_SEH_HANDLE_EX(ill);
96 case EXCEPTION_INT_DIVIDE_BY_ZERO:
97 case EXCEPTION_INT_OVERFLOW:
98 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
99 case EXCEPTION_FLT_OVERFLOW:
100 case EXCEPTION_FLT_UNDERFLOW:
101 case EXCEPTION_FLT_INEXACT_RESULT:
102 W32_SEH_HANDLE_EX(fpe);
105 jit_tls->mono_win_chained_exception_needs_run = TRUE;
109 if (jit_tls->mono_win_chained_exception_needs_run) {
110 /* Don't copy context back if we chained exception
111 * as the handler may have modfied the EXCEPTION_POINTERS
112 * directly. We don't pass sigcontext to chained handlers.
113 * Return continue search so the UnhandledExceptionFilter
114 * can correctly chain the exception.
116 res = EXCEPTION_CONTINUE_SEARCH;
122 void win32_seh_init()
124 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
125 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
128 void win32_seh_cleanup()
132 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
134 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
138 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
142 fpe_handler = handler;
145 ill_handler = handler;
148 segv_handler = handler;
155 #endif /* TARGET_WIN32 */
159 * mono_arch_get_restore_context:
161 * Returns a pointer to a method which restores a previously saved sigcontext.
164 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
166 guint8 *start = NULL;
168 MonoJumpInfo *ji = NULL;
169 GSList *unwind_ops = NULL;
172 /* restore_contect (MonoContext *ctx) */
174 start = code = (guint8 *)mono_global_codeman_reserve (256);
176 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
178 /* Restore all registers except %rip and %r11 */
179 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
180 for (i = 0; i < AMD64_NREG; ++i) {
181 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
182 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
186 * The context resides on the stack, in the stack frame of the
187 * caller of this function. The stack pointer that we need to
188 * restore is potentially many stack frames higher up, so the
189 * distance between them can easily be more than the red zone
190 * size. Hence the stack pointer can be restored only after
191 * we have finished loading everything from the context.
193 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
194 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
195 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
197 /* jump to the saved IP */
198 amd64_jump_reg (code, AMD64_R11);
200 mono_arch_flush_icache (start, code - start);
201 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
204 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
210 * mono_arch_get_call_filter:
212 * Returns a pointer to a method which calls an exception filter. We
213 * also use this function to call finally handlers (we pass NULL as
214 * @exc object in this case).
217 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
223 MonoJumpInfo *ji = NULL;
224 GSList *unwind_ops = NULL;
225 const guint kMaxCodeSize = 128;
227 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
229 /* call_filter (MonoContext *ctx, unsigned long eip) */
232 /* Alloc new frame */
233 amd64_push_reg (code, AMD64_RBP);
234 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
236 /* Save callee saved regs */
238 for (i = 0; i < AMD64_NREG; ++i)
239 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
240 amd64_push_reg (code, i);
246 amd64_push_reg (code, AMD64_RBP);
248 /* Make stack misaligned, the call will make it aligned again */
250 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
252 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
255 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
256 /* load callee saved regs */
257 for (i = 0; i < AMD64_NREG; ++i) {
258 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
259 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
261 /* load exc register */
262 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
264 /* call the handler */
265 amd64_call_reg (code, AMD64_ARG_REG2);
268 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
271 amd64_pop_reg (code, AMD64_RBP);
273 /* Restore callee saved regs */
274 for (i = AMD64_NREG; i >= 0; --i)
275 if (AMD64_IS_CALLEE_SAVED_REG (i))
276 amd64_pop_reg (code, i);
279 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
280 amd64_pop_reg (code, AMD64_RBP);
286 g_assert ((code - start) < kMaxCodeSize);
288 mono_arch_flush_icache (start, code - start);
289 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
292 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
296 #endif /* !DISABLE_JIT */
299 * The first few arguments are dummy, to force the other arguments to be passed on
300 * the stack, this avoids overwriting the argument registers in the throw trampoline.
303 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
304 guint64 dummy5, guint64 dummy6,
305 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
310 /* mctx is on the caller's stack */
311 memcpy (&ctx, mctx, sizeof (MonoContext));
313 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
314 MonoException *mono_ex = (MonoException*)exc;
316 mono_ex->stack_trace = NULL;
317 mono_ex->trace_ips = NULL;
320 mono_error_assert_ok (&error);
322 /* adjust eip so that it point into the call instruction */
323 ctx.gregs [AMD64_RIP] --;
325 mono_handle_exception (&ctx, exc);
326 mono_restore_context (&ctx);
327 g_assert_not_reached ();
331 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
332 guint64 dummy5, guint64 dummy6,
333 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
335 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
338 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
340 mctx->gregs [AMD64_RIP] -= pc_offset;
342 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
343 mctx->gregs [AMD64_RIP] += 1;
345 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
349 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
350 guint64 dummy5, guint64 dummy6,
351 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
353 /* Only the register parameters are valid */
356 /* mctx is on the caller's stack */
357 memcpy (&ctx, mctx, sizeof (MonoContext));
359 mono_resume_unwind (&ctx);
364 * get_throw_trampoline:
366 * Generate a call to mono_amd64_throw_exception/
367 * mono_amd64_throw_corlib_exception.
370 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
374 MonoJumpInfo *ji = NULL;
375 GSList *unwind_ops = NULL;
376 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
377 const guint kMaxCodeSize = 256;
380 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
382 dummy_stack_space = 0;
386 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
388 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
390 /* The stack is unaligned on entry */
391 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
396 unwind_ops = mono_arch_get_cie_program ();
399 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
401 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
402 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
406 * To hide linux/windows calling convention differences, we pass all arguments on
407 * the stack by passing 6 dummy values in registers.
410 arg_offsets [0] = dummy_stack_space + 0;
411 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
412 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
413 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
414 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
417 for (i = 0; i < AMD64_NREG; ++i)
419 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
421 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
422 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
424 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
425 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
426 /* Set arg1 == ctx */
427 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
428 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
429 /* Set arg2 == exc/ex_token_index */
431 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
433 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
434 /* Set arg3 == rethrow/pc offset */
436 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
440 * The caller doesn't pass in a pc/pc offset, instead we simply use the
441 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
443 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
445 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
447 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
451 const char *icall_name;
454 icall_name = "mono_amd64_resume_unwind";
456 icall_name = "mono_amd64_throw_corlib_exception";
458 icall_name = "mono_amd64_throw_exception";
459 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
460 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
462 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
464 amd64_call_reg (code, AMD64_R11);
465 amd64_breakpoint (code);
467 mono_arch_flush_icache (start, code - start);
469 g_assert ((code - start) < kMaxCodeSize);
470 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
472 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
475 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
481 * mono_arch_get_throw_exception:
483 * Returns a function pointer which can be used to raise
484 * exceptions. The returned function has the following
485 * signature: void (*func) (MonoException *exc);
489 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
491 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
495 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
497 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
501 * mono_arch_get_throw_corlib_exception:
503 * Returns a function pointer which can be used to raise
504 * corlib exceptions. The returned function has the following
505 * signature: void (*func) (guint32 ex_token, guint32 offset);
506 * Here, offset is the offset which needs to be substracted from the caller IP
507 * to get the IP of the throw. Passing the offset has the advantage that it
508 * needs no relocations in the caller.
511 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
513 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
515 #endif /* !DISABLE_JIT */
518 * mono_arch_unwind_frame:
520 * This function is used to gather information from @ctx, and store it in @frame_info.
521 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
522 * is modified if needed.
523 * Returns TRUE on success, FALSE otherwise.
526 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
527 MonoJitInfo *ji, MonoContext *ctx,
528 MonoContext *new_ctx, MonoLMF **lmf,
529 mgreg_t **save_locations,
530 StackFrameInfo *frame)
532 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
535 memset (frame, 0, sizeof (StackFrameInfo));
541 mgreg_t regs [MONO_MAX_IREGS + 1];
543 guint32 unwind_info_len;
545 guint8 *epilog = NULL;
547 if (ji->is_trampoline)
548 frame->type = FRAME_TYPE_TRAMPOLINE;
550 frame->type = FRAME_TYPE_MANAGED;
552 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
554 frame->unwind_info = unwind_info;
555 frame->unwind_info_len = unwind_info_len;
558 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
559 mono_print_unwind_info (unwind_info, unwind_info_len);
561 /* LLVM compiled code doesn't have this info */
562 if (ji->has_arch_eh_info)
563 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
565 for (i = 0; i < AMD64_NREG; ++i)
566 regs [i] = new_ctx->gregs [i];
568 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
569 (guint8*)ji->code_start + ji->code_size,
570 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
571 save_locations, MONO_MAX_IREGS, &cfa);
573 for (i = 0; i < AMD64_NREG; ++i)
574 new_ctx->gregs [i] = regs [i];
576 /* The CFA becomes the new SP value */
577 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
580 new_ctx->gregs [AMD64_RIP] --;
586 if (((guint64)(*lmf)->previous_lmf) & 2) {
588 * This LMF entry is created by the soft debug code to mark transitions to
589 * managed code done during invokes.
591 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
593 g_assert (ext->debugger_invoke);
595 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
597 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
599 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
604 if (((guint64)(*lmf)->previous_lmf) & 4) {
605 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
607 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
608 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
609 /* This LMF has the rip field set */
611 } else if ((*lmf)->rsp == 0) {
616 * The rsp field is set just before the call which transitioned to native
617 * code. Obtain the rip from the stack.
619 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
622 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
624 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
625 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
633 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
635 if (((guint64)(*lmf)->previous_lmf) & 4) {
636 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
638 /* Trampoline frame */
639 for (i = 0; i < AMD64_NREG; ++i)
640 new_ctx->gregs [i] = ext->ctx->gregs [i];
642 new_ctx->gregs [AMD64_RIP] --;
645 * The registers saved in the LMF will be restored using the normal unwind info,
646 * when the wrapper frame is processed.
650 new_ctx->gregs [AMD64_RIP] = rip;
651 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
652 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
653 for (i = 0; i < AMD64_NREG; ++i) {
654 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
655 new_ctx->gregs [i] = 0;
659 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
670 * Called by resuming from a signal handler.
673 handle_signal_exception (gpointer obj)
675 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
678 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
680 mono_handle_exception (&ctx, (MonoObject *)obj);
682 mono_restore_context (&ctx);
686 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
688 guint64 sp = ctx->gregs [AMD64_RSP];
690 ctx->gregs [AMD64_RDI] = (guint64)user_data;
692 /* Allocate a stack frame below the red zone */
694 /* The stack should be unaligned */
698 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
699 *(guint64*)sp = ctx->gregs [AMD64_RIP];
701 ctx->gregs [AMD64_RSP] = sp;
702 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
706 * mono_arch_handle_exception:
708 * @ctx: saved processor state
709 * @obj: the exception object
712 mono_arch_handle_exception (void *sigctx, gpointer obj)
714 #if defined(MONO_ARCH_USE_SIGACTION)
718 * Handling the exception in the signal handler is problematic, since the original
719 * signal is disabled, and we could run arbitrary code though the debugger. So
720 * resume into the normal stack and do most work there if possible.
722 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
724 /* Pass the ctx parameter in TLS */
725 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
727 mctx = jit_tls->ex_ctx;
728 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
729 mono_monoctx_to_sigctx (&mctx, sigctx);
735 mono_sigctx_to_monoctx (sigctx, &mctx);
737 mono_handle_exception (&mctx, obj);
739 mono_monoctx_to_sigctx (&mctx, sigctx);
746 mono_arch_ip_from_context (void *sigctx)
748 #if defined(MONO_ARCH_USE_SIGACTION)
749 ucontext_t *ctx = (ucontext_t*)sigctx;
751 return (gpointer)UCONTEXT_REG_RIP (ctx);
752 #elif defined(HOST_WIN32)
753 return ((CONTEXT*)sigctx)->Rip;
755 MonoContext *ctx = sigctx;
756 return (gpointer)ctx->gregs [AMD64_RIP];
761 restore_soft_guard_pages (void)
763 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
764 if (jit_tls->stack_ovf_guard_base)
765 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
769 * this function modifies mctx so that when it is restored, it
770 * won't execcute starting at mctx.eip, but in a function that
771 * will restore the protection on the soft-guard pages and return back to
772 * continue at mctx.eip.
775 prepare_for_guard_pages (MonoContext *mctx)
778 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
780 /* the return addr */
781 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
782 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
783 mctx->gregs [AMD64_RSP] = (guint64)sp;
787 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
790 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
793 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
797 mono_handle_exception (&mctx, obj);
799 prepare_for_guard_pages (&mctx);
800 mono_restore_context (&mctx);
804 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
806 #if defined(MONO_ARCH_USE_SIGACTION)
807 MonoException *exc = NULL;
810 MonoContext *copied_ctx;
813 exc = mono_domain_get ()->stack_overflow_ex;
815 /* setup a call frame on the real stack so that control is returned there
816 * and exception handling can continue.
817 * The frame looks like:
821 * 128 is the size of the red zone
823 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
826 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
827 sp = (gpointer *)((char*)sp - frame_size);
828 copied_ctx = (MonoContext*)(sp + 4);
829 /* the arguments must be aligned */
830 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
831 mono_sigctx_to_monoctx (sigctx, copied_ctx);
832 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
833 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
834 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
835 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
836 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
837 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
842 mono_amd64_get_original_ip (void)
844 MonoLMF *lmf = mono_get_lmf ();
848 /* Reset the change to previous_lmf */
849 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
856 mono_amd64_get_exception_trampolines (gboolean aot)
859 GSList *tramps = NULL;
861 /* LLVM needs different throw trampolines */
862 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
863 tramps = g_slist_prepend (tramps, info);
865 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
866 tramps = g_slist_prepend (tramps, info);
868 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
869 tramps = g_slist_prepend (tramps, info);
873 #endif /* !DISABLE_JIT */
876 mono_arch_exceptions_init (void)
882 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
883 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
884 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
885 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
886 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
887 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
889 /* Call this to avoid initialization races */
890 tramps = mono_amd64_get_exception_trampolines (FALSE);
891 for (l = tramps; l; l = l->next) {
892 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
894 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
895 mono_tramp_info_register (info, NULL);
897 g_slist_free (tramps);
901 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
904 mono_arch_unwindinfo_create (gpointer* monoui)
906 PUNWIND_INFO newunwindinfo;
907 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
908 newunwindinfo->Version = 1;
912 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
914 PUNWIND_CODE unwindcode;
917 g_assert (unwindinfo != NULL);
919 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
920 g_error ("Larger allocation needed for the unwind information.");
922 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
923 unwindcode = &unwindinfo->UnwindCode [codeindex];
924 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
925 unwindcode->CodeOffset = (guchar)unwind_op->when;
926 unwindcode->OpInfo = unwind_op->reg;
928 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
929 g_error ("Adding unwind info in wrong order.");
931 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
935 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
937 PUNWIND_CODE unwindcode;
940 g_assert (unwindinfo != NULL);
942 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
943 g_error ("Larger allocation needed for the unwind information.");
945 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
946 unwindcode = &unwindinfo->UnwindCode [codeindex];
947 unwindcode->UnwindOp = UWOP_SET_FPREG;
948 unwindcode->CodeOffset = (guchar)unwind_op->when;
950 g_assert (unwind_op->val % 16 == 0);
951 unwindinfo->FrameRegister = unwind_op->reg;
952 unwindinfo->FrameOffset = unwind_op->val / 16;
954 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
955 g_error ("Adding unwind info in wrong order.");
957 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
961 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
963 PUNWIND_CODE unwindcode;
968 g_assert (unwindinfo != NULL);
970 size = unwind_op->val;
973 g_error ("Stack allocation must be equal to or greater than 0x8.");
977 else if (size <= 0x7FFF8)
982 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
983 g_error ("Larger allocation needed for the unwind information.");
985 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
986 unwindcode = &unwindinfo->UnwindCode [codeindex];
988 unwindcode->CodeOffset = (guchar)unwind_op->when;
990 if (codesneeded == 1) {
991 /*The size of the allocation is
992 (the number in the OpInfo member) times 8 plus 8*/
993 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
994 unwindcode->OpInfo = (size - 8)/8;
997 if (codesneeded == 3) {
998 /*the unscaled size of the allocation is recorded
999 in the next two slots in little-endian format.
1000 NOTE, unwind codes are allocated from end to begining of list so
1001 unwind code will have right execution order. List is sorted on CodeOffset
1002 using descending sort order.*/
1003 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1004 unwindcode->OpInfo = 1;
1005 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1008 /*the size of the allocation divided by 8
1009 is recorded in the next slot.
1010 NOTE, unwind codes are allocated from end to begining of list so
1011 unwind code will have right execution order. List is sorted on CodeOffset
1012 using descending sort order.*/
1013 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1014 unwindcode->OpInfo = 0;
1015 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1019 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1020 g_error ("Adding unwind info in wrong order.");
1022 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1025 static gboolean g_dyn_func_table_inited;
1027 // Dynamic function table used when registering unwind info for OS unwind support.
1028 static GList *g_dynamic_function_table_begin;
1029 static GList *g_dynamic_function_table_end;
1031 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1032 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1034 // Module handle used when explicit loading ntdll.
1035 static HMODULE g_ntdll;
1037 // If Win8 or Win2012Server or later, use growable function tables instead
1038 // of callbacks. Callback solution will still be fallback on older systems.
1039 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1040 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1041 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1043 // When using function table callback solution an out of proc module is needed by
1044 // debuggers in order to read unwind info from debug target.
1046 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1048 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1051 #define MONO_DAC_MODULE_MAX_PATH 1024
1054 init_table_no_lock (void)
1056 if (g_dyn_func_table_inited == FALSE) {
1057 g_assert_checked (g_dynamic_function_table_begin == NULL);
1058 g_assert_checked (g_dynamic_function_table_end == NULL);
1059 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1060 g_assert_checked (g_rtl_grow_function_table == NULL);
1061 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1062 g_assert_checked (g_ntdll == NULL);
1064 // Load functions available on Win8/Win2012Server or later. If running on earlier
1065 // systems the below GetProceAddress will fail, this is expected behavior.
1066 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1067 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1068 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1069 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1072 g_dyn_func_table_inited = TRUE;
1077 mono_arch_unwindinfo_init_table (void)
1079 if (g_dyn_func_table_inited == FALSE) {
1081 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1083 init_table_no_lock ();
1085 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1090 terminate_table_no_lock (void)
1092 if (g_dyn_func_table_inited == TRUE) {
1093 if (g_dynamic_function_table_begin != NULL) {
1094 // Free all list elements.
1095 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1103 g_list_free (g_dynamic_function_table_begin);
1104 g_dynamic_function_table_begin = NULL;
1105 g_dynamic_function_table_end = NULL;
1108 g_rtl_delete_growable_function_table = NULL;
1109 g_rtl_grow_function_table = NULL;
1110 g_rtl_add_growable_function_table = NULL;
1112 if (g_ntdll != NULL) {
1113 FreeLibrary (g_ntdll);
1117 g_dyn_func_table_inited = FALSE;
1122 mono_arch_unwindinfo_terminate_table (void)
1124 if (g_dyn_func_table_inited == TRUE) {
1126 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1128 terminate_table_no_lock ();
1130 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1135 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1137 GList *found_entry = NULL;
1139 // Fast path, look at boundaries.
1140 if (g_dynamic_function_table_begin != NULL) {
1141 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1142 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1144 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1145 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1146 // Entry belongs to first entry in list.
1147 found_entry = g_dynamic_function_table_begin;
1148 *continue_search = FALSE;
1150 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1151 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1152 // Entry has a range that could exist in table, continue search.
1153 *continue_search = TRUE;
1162 static inline DynamicFunctionTableEntry *
1163 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1165 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1166 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1170 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1172 GList *found_entry = NULL;
1173 gboolean continue_search = FALSE;
1175 gsize begin_range = (gsize)code_block;
1176 gsize end_range = begin_range + block_size;
1178 // Fast path, check table boundaries.
1179 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1180 if (found_entry || continue_search == FALSE)
1183 // Scan table for an entry including range.
1184 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1185 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1186 g_assert_checked (current_entry != NULL);
1188 // Do we have a match?
1189 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1198 static inline DynamicFunctionTableEntry *
1199 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1201 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1202 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1206 find_pc_in_table_no_lock_ex (const gpointer pc)
1208 GList *found_entry = NULL;
1209 gboolean continue_search = FALSE;
1211 gsize begin_range = (gsize)pc;
1212 gsize end_range = begin_range;
1214 // Fast path, check table boundaries.
1215 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1216 if (found_entry || continue_search == FALSE)
1219 // Scan table for a entry including range.
1220 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1221 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1222 g_assert_checked (current_entry != NULL);
1224 // Do we have a match?
1225 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1234 static inline DynamicFunctionTableEntry *
1235 find_pc_in_table_no_lock (const gpointer pc)
1237 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1238 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1241 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1243 validate_table_no_lock (void)
1245 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1246 // Method will assert on failure to explicitly indicate what check failed.
1247 if (g_dynamic_function_table_begin != NULL) {
1248 g_assert_checked (g_dynamic_function_table_end != NULL);
1250 DynamicFunctionTableEntry *prevoious_entry = NULL;
1251 DynamicFunctionTableEntry *current_entry = NULL;
1252 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1253 current_entry = (DynamicFunctionTableEntry *)node->data;
1255 g_assert_checked (current_entry != NULL);
1256 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1258 if (prevoious_entry != NULL) {
1259 // List should be sorted in descending order on begin_range.
1260 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1262 // Check for overlapped regions.
1263 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1266 prevoious_entry = current_entry;
1274 validate_table_no_lock (void)
1278 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1281 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1283 DynamicFunctionTableEntry *
1284 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1286 DynamicFunctionTableEntry *new_entry = NULL;
1288 gsize begin_range = (gsize)code_block;
1289 gsize end_range = begin_range + block_size;
1291 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1292 init_table_no_lock ();
1293 new_entry = find_range_in_table_no_lock (code_block, block_size);
1294 if (new_entry == NULL) {
1295 // Allocate new entry.
1296 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1297 if (new_entry != NULL) {
1299 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1300 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1301 InitializeSRWLock (&new_entry->lock);
1302 new_entry->handle = NULL;
1303 new_entry->begin_range = begin_range;
1304 new_entry->end_range = end_range;
1305 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1306 new_entry->rt_funcs_current_count = 0;
1307 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1309 if (new_entry->rt_funcs != NULL) {
1310 // Check insert on boundaries. List is sorted descending on begin_range.
1311 if (g_dynamic_function_table_begin == NULL) {
1312 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1313 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1314 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1315 // Insert at the head.
1316 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1317 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1319 g_list_append (g_dynamic_function_table_end, new_entry);
1320 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1322 //Search and insert at correct position.
1323 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1324 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1325 g_assert_checked (current_entry != NULL);
1327 if (current_entry->begin_range < new_entry->begin_range) {
1328 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1334 // Register dynamic function table entry with OS.
1335 if (g_rtl_add_growable_function_table != NULL) {
1336 // Allocate new growable handle table for entry.
1337 g_assert_checked (new_entry->handle == NULL);
1338 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1339 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1340 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1343 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1344 WCHAR *path = buffer;
1346 // DAC module should be in the same directory as the
1348 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1349 path = wcsrchr (buffer, TEXT('\\'));
1355 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1358 // Register function table callback + out of proc module.
1359 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1360 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1361 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1362 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1366 // Only included in checked builds. Validates the structure of table after insert.
1367 validate_table_no_lock ();
1375 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1381 remove_range_in_table_no_lock (GList *entry)
1383 if (entry != NULL) {
1384 if (entry == g_dynamic_function_table_end)
1385 g_dynamic_function_table_end = entry->prev;
1387 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1388 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1390 g_assert_checked (removed_entry != NULL);
1391 g_assert_checked (removed_entry->rt_funcs != NULL);
1393 // Remove function table from OS.
1394 if (removed_entry->handle != NULL) {
1395 if (g_rtl_delete_growable_function_table != NULL) {
1396 g_rtl_delete_growable_function_table (removed_entry->handle);
1398 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1402 g_free (removed_entry->rt_funcs);
1403 g_free (removed_entry);
1405 g_list_free_1 (entry);
1408 // Only included in checked builds. Validates the structure of table after remove.
1409 validate_table_no_lock ();
1413 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1415 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1417 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1419 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1420 remove_range_in_table_no_lock (found_entry);
1422 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1426 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1428 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1430 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1432 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1433 remove_range_in_table_no_lock (found_entry);
1435 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1439 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1441 PRUNTIME_FUNCTION found_rt_func = NULL;
1443 gsize begin_range = (gsize)code;
1444 gsize end_range = begin_range + code_size;
1446 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1448 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1450 if (found_entry != NULL) {
1452 AcquireSRWLockShared (&found_entry->lock);
1454 g_assert_checked (found_entry->begin_range <= begin_range);
1455 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1456 g_assert_checked (found_entry->rt_funcs != NULL);
1458 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1459 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1461 // Is this our RT function entry?
1462 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1463 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1464 found_rt_func = current_rt_func;
1469 ReleaseSRWLockShared (&found_entry->lock);
1472 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1474 return found_rt_func;
1477 inline PRUNTIME_FUNCTION
1478 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1480 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1483 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1485 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1487 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1488 // Method will assert on failure to explicitly indicate what check failed.
1489 g_assert_checked (entry != NULL);
1490 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1491 g_assert_checked (entry->rt_funcs != NULL);
1493 PRUNTIME_FUNCTION current_rt_func = NULL;
1494 PRUNTIME_FUNCTION previous_rt_func = NULL;
1495 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1496 current_rt_func = &(entry->rt_funcs [i]);
1498 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1499 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1501 if (previous_rt_func != NULL) {
1502 // List should be sorted in ascending order based on BeginAddress.
1503 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1505 // Check for overlapped regions.
1506 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1509 previous_rt_func = current_rt_func;
1516 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1520 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1523 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1525 PRUNTIME_FUNCTION new_rt_func = NULL;
1527 gsize begin_range = (gsize)code;
1528 gsize end_range = begin_range + code_size;
1530 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1532 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1534 if (found_entry != NULL) {
1536 AcquireSRWLockExclusive (&found_entry->lock);
1538 g_assert_checked (found_entry->begin_range <= begin_range);
1539 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1540 g_assert_checked (found_entry->rt_funcs != NULL);
1541 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1543 gsize code_offset = (gsize)code - found_entry->begin_range;
1544 gsize entry_count = found_entry->rt_funcs_current_count;
1545 gsize max_entry_count = found_entry->rt_funcs_max_count;
1546 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1548 RUNTIME_FUNCTION new_rt_func_data;
1549 new_rt_func_data.BeginAddress = code_offset;
1550 new_rt_func_data.EndAddress = code_offset + code_size;
1552 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1553 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1555 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1557 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1559 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1560 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1561 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1562 new_rt_func = &(current_rt_funcs [entry_count]);
1563 *new_rt_func = new_rt_func_data;
1566 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1567 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1568 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1570 if (new_rt_funcs != NULL) {
1571 gsize from_index = 0;
1574 // Copy from old table into new table. Make sure new rt func gets inserted
1575 // into correct location based on sort order.
1576 for (; from_index < entry_count; ++from_index) {
1577 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1578 new_rt_func = &(new_rt_funcs [to_index++]);
1579 *new_rt_func = new_rt_func_data;
1582 if (current_rt_funcs [from_index].UnwindData != 0)
1583 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1586 // If we didn't insert by now, put it last in the list.
1587 if (new_rt_func == NULL) {
1588 new_rt_func = &(new_rt_funcs [to_index]);
1589 *new_rt_func = new_rt_func_data;
1596 // Update the stats for current entry.
1597 found_entry->rt_funcs_current_count = entry_count;
1598 found_entry->rt_funcs_max_count = max_entry_count;
1600 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1601 // No new table just report increase in use.
1602 g_assert_checked (found_entry->handle != NULL);
1603 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1604 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1605 // New table, delete old table and rt funcs, and register a new one.
1606 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1607 g_rtl_delete_growable_function_table (found_entry->handle);
1608 found_entry->handle = NULL;
1609 g_free (found_entry->rt_funcs);
1610 found_entry->rt_funcs = new_rt_funcs;
1611 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1612 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1613 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1615 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1616 // No table registered with OS, callback solution in use. Switch tables.
1617 g_free (found_entry->rt_funcs);
1618 found_entry->rt_funcs = new_rt_funcs;
1619 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1620 // No table registered with OS, callback solution in use, nothing to do.
1622 g_assert_not_reached ();
1625 // Only included in checked builds. Validates the structure of table after insert.
1626 validate_rt_funcs_in_table_no_lock (found_entry);
1628 ReleaseSRWLockExclusive (&found_entry->lock);
1631 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1636 static PRUNTIME_FUNCTION
1637 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1639 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1643 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1645 if (unwind_ops != NULL && unwindinfo != NULL) {
1646 MonoUnwindOp *unwind_op_data;
1647 gboolean sp_alloced = FALSE;
1648 gboolean fp_alloced = FALSE;
1650 // Replay collected unwind info and setup Windows format.
1651 for (GSList *l = unwind_ops; l; l = l->next) {
1652 unwind_op_data = (MonoUnwindOp *)l->data;
1653 switch (unwind_op_data->op) {
1654 case DW_CFA_offset : {
1655 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1656 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1657 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1658 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1661 case DW_CFA_mono_sp_alloc_info_win64 : {
1662 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1666 case DW_CFA_mono_fp_alloc_info_win64 : {
1667 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1679 initialize_unwind_info_internal (GSList *unwind_ops)
1681 PUNWIND_INFO unwindinfo;
1683 mono_arch_unwindinfo_create (&unwindinfo);
1684 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1690 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1692 UNWIND_INFO unwindinfo = {0};
1693 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1694 return unwindinfo.CountOfCodes;
1698 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1700 MonoCompile * current_cfg = (MonoCompile *)cfg;
1701 g_assert (current_cfg->arch.unwindinfo == NULL);
1702 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1703 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1707 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1709 PUNWIND_INFO unwindinfo, targetinfo;
1711 guint64 targetlocation;
1715 unwindinfo = (PUNWIND_INFO)*monoui;
1716 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1717 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1719 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1721 codecount = unwindinfo->CountOfCodes;
1723 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1724 sizeof (UNWIND_CODE) * codecount);
1727 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1729 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1730 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1732 for (int current = 0; current < codecount; current++) {
1733 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1735 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1736 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1744 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1746 g_free (unwindinfo);
1749 // Register unwind info in table.
1750 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1754 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1756 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1757 if (unwindinfo != NULL) {
1758 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1763 mono_arch_code_chunk_new (void *chunk, int size)
1765 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1768 void mono_arch_code_chunk_destroy (void *chunk)
1770 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1772 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1774 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1775 MonoContinuationRestore
1776 mono_tasklets_arch_restore (void)
1778 static guint8* saved = NULL;
1779 guint8 *code, *start;
1780 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1781 const guint kMaxCodeSize = 64;
1785 return (MonoContinuationRestore)saved;
1786 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1787 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1788 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1789 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1790 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1791 * We move cont to cont_reg since we need both rcx and rdi for the copy
1792 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1794 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1795 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1796 /* setup the copy of the stack */
1797 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1798 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1800 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1801 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1802 amd64_prefix (code, X86_REP_PREFIX);
1805 /* now restore the registers from the LMF */
1806 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1807 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1808 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1811 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1813 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1816 /* state is already in rax */
1817 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1818 g_assert ((code - start) <= kMaxCodeSize);
1820 mono_arch_flush_icache (start, code - start);
1821 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1824 return (MonoContinuationRestore)saved;
1826 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1829 * mono_arch_setup_resume_sighandler_ctx:
1831 * Setup CTX so execution continues at FUNC.
1834 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1837 * When resuming from a signal handler, the stack should be misaligned, just like right after
1840 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1841 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1842 MONO_CONTEXT_SET_IP (ctx, func);
1847 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1849 g_assert_not_reached ();
1854 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1856 g_assert_not_reached ();
1861 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1863 g_assert_not_reached ();
1868 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1870 g_assert_not_reached ();
1875 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1877 g_assert_not_reached ();
1882 mono_amd64_get_exception_trampolines (gboolean aot)
1884 g_assert_not_reached ();
1887 #endif /* DISABLE_JIT */
1889 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1890 MonoContinuationRestore
1891 mono_tasklets_arch_restore (void)
1893 g_assert_not_reached ();
1896 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */