3 * exception support for AMD64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Johan Lorensson (lateralusx.github@gmail.com)
9 * (C) 2001 Ximian, Inc.
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 // Secret password to unlock wcscat_s on mxe, must happen before string.h included
18 #define MINGW_HAS_SECURE_API 1
27 #ifdef HAVE_UCONTEXT_H
31 #include <mono/arch/amd64/amd64-codegen.h>
32 #include <mono/metadata/abi-details.h>
33 #include <mono/metadata/appdomain.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/threads.h>
36 #include <mono/metadata/threads-types.h>
37 #include <mono/metadata/debug-helpers.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/gc-internals.h>
40 #include <mono/metadata/mono-debug.h>
41 #include <mono/utils/mono-mmap.h>
44 #include "mini-amd64.h"
47 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
50 static MonoW32ExceptionHandler fpe_handler;
51 static MonoW32ExceptionHandler ill_handler;
52 static MonoW32ExceptionHandler segv_handler;
54 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
55 void *mono_win_vectored_exception_handle;
57 #define W32_SEH_HANDLE_EX(_ex) \
58 if (_ex##_handler) _ex##_handler(0, ep, ctx)
60 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
62 #ifndef MONO_CROSS_COMPILE
63 if (mono_old_win_toplevel_exception_filter) {
64 return (*mono_old_win_toplevel_exception_filter)(ep);
68 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
70 return EXCEPTION_CONTINUE_SEARCH;
74 * Unhandled Exception Filter
75 * Top-level per-process exception handler.
77 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
82 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
84 /* If the thread is not managed by the runtime return early */
86 return EXCEPTION_CONTINUE_SEARCH;
88 jit_tls->mono_win_chained_exception_needs_run = FALSE;
89 res = EXCEPTION_CONTINUE_EXECUTION;
91 er = ep->ExceptionRecord;
92 ctx = ep->ContextRecord;
94 switch (er->ExceptionCode) {
95 case EXCEPTION_ACCESS_VIOLATION:
96 W32_SEH_HANDLE_EX(segv);
98 case EXCEPTION_ILLEGAL_INSTRUCTION:
99 W32_SEH_HANDLE_EX(ill);
101 case EXCEPTION_INT_DIVIDE_BY_ZERO:
102 case EXCEPTION_INT_OVERFLOW:
103 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
104 case EXCEPTION_FLT_OVERFLOW:
105 case EXCEPTION_FLT_UNDERFLOW:
106 case EXCEPTION_FLT_INEXACT_RESULT:
107 W32_SEH_HANDLE_EX(fpe);
110 jit_tls->mono_win_chained_exception_needs_run = TRUE;
114 if (jit_tls->mono_win_chained_exception_needs_run) {
115 /* Don't copy context back if we chained exception
116 * as the handler may have modfied the EXCEPTION_POINTERS
117 * directly. We don't pass sigcontext to chained handlers.
118 * Return continue search so the UnhandledExceptionFilter
119 * can correctly chain the exception.
121 res = EXCEPTION_CONTINUE_SEARCH;
127 void win32_seh_init()
129 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
130 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
133 void win32_seh_cleanup()
137 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
139 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
143 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
147 fpe_handler = handler;
150 ill_handler = handler;
153 segv_handler = handler;
160 #endif /* TARGET_WIN32 */
164 * mono_arch_get_restore_context:
166 * Returns a pointer to a method which restores a previously saved sigcontext.
169 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
171 guint8 *start = NULL;
173 MonoJumpInfo *ji = NULL;
174 GSList *unwind_ops = NULL;
177 /* restore_contect (MonoContext *ctx) */
179 start = code = (guint8 *)mono_global_codeman_reserve (256);
181 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
183 /* Restore all registers except %rip and %r11 */
184 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
185 for (i = 0; i < AMD64_NREG; ++i) {
186 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_R8 && i != AMD64_R9 && i != AMD64_R10 && i != AMD64_R11)
187 amd64_mov_reg_membase (code, i, AMD64_R11, gregs_offset + (i * 8), 8);
191 * The context resides on the stack, in the stack frame of the
192 * caller of this function. The stack pointer that we need to
193 * restore is potentially many stack frames higher up, so the
194 * distance between them can easily be more than the red zone
195 * size. Hence the stack pointer can be restored only after
196 * we have finished loading everything from the context.
198 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, gregs_offset + (AMD64_RSP * 8), 8);
199 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, gregs_offset + (AMD64_RIP * 8), 8);
200 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
202 /* jump to the saved IP */
203 amd64_jump_reg (code, AMD64_R11);
205 mono_arch_flush_icache (start, code - start);
206 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
209 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
215 * mono_arch_get_call_filter:
217 * Returns a pointer to a method which calls an exception filter. We
218 * also use this function to call finally handlers (we pass NULL as
219 * @exc object in this case).
222 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
228 MonoJumpInfo *ji = NULL;
229 GSList *unwind_ops = NULL;
230 const guint kMaxCodeSize = 128;
232 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
234 /* call_filter (MonoContext *ctx, unsigned long eip) */
237 /* Alloc new frame */
238 amd64_push_reg (code, AMD64_RBP);
239 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
241 /* Save callee saved regs */
243 for (i = 0; i < AMD64_NREG; ++i)
244 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
245 amd64_push_reg (code, i);
251 amd64_push_reg (code, AMD64_RBP);
253 /* Make stack misaligned, the call will make it aligned again */
255 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
257 gregs_offset = MONO_STRUCT_OFFSET (MonoContext, gregs);
260 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, gregs_offset + (AMD64_RBP * 8), 8);
261 /* load callee saved regs */
262 for (i = 0; i < AMD64_NREG; ++i) {
263 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
264 amd64_mov_reg_membase (code, i, AMD64_ARG_REG1, gregs_offset + (i * 8), 8);
266 /* load exc register */
267 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, gregs_offset + (AMD64_RAX * 8), 8);
269 /* call the handler */
270 amd64_call_reg (code, AMD64_ARG_REG2);
273 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
276 amd64_pop_reg (code, AMD64_RBP);
278 /* Restore callee saved regs */
279 for (i = AMD64_NREG; i >= 0; --i)
280 if (AMD64_IS_CALLEE_SAVED_REG (i))
281 amd64_pop_reg (code, i);
284 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
285 amd64_pop_reg (code, AMD64_RBP);
291 g_assert ((code - start) < kMaxCodeSize);
293 mono_arch_flush_icache (start, code - start);
294 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
297 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
301 #endif /* !DISABLE_JIT */
304 * The first few arguments are dummy, to force the other arguments to be passed on
305 * the stack, this avoids overwriting the argument registers in the throw trampoline.
308 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
309 guint64 dummy5, guint64 dummy6,
310 MonoContext *mctx, MonoObject *exc, gboolean rethrow)
315 /* mctx is on the caller's stack */
316 memcpy (&ctx, mctx, sizeof (MonoContext));
318 if (mono_object_isinst_checked (exc, mono_defaults.exception_class, &error)) {
319 MonoException *mono_ex = (MonoException*)exc;
321 mono_ex->stack_trace = NULL;
322 mono_ex->trace_ips = NULL;
325 mono_error_assert_ok (&error);
327 /* adjust eip so that it point into the call instruction */
328 ctx.gregs [AMD64_RIP] --;
330 mono_handle_exception (&ctx, exc);
331 mono_restore_context (&ctx);
332 g_assert_not_reached ();
336 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
337 guint64 dummy5, guint64 dummy6,
338 MonoContext *mctx, guint32 ex_token_index, gint64 pc_offset)
340 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
343 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
345 mctx->gregs [AMD64_RIP] -= pc_offset;
347 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
348 mctx->gregs [AMD64_RIP] += 1;
350 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, mctx, (MonoObject*)ex, FALSE);
354 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
355 guint64 dummy5, guint64 dummy6,
356 MonoContext *mctx, guint32 dummy7, gint64 dummy8)
358 /* Only the register parameters are valid */
361 /* mctx is on the caller's stack */
362 memcpy (&ctx, mctx, sizeof (MonoContext));
364 mono_resume_unwind (&ctx);
369 * get_throw_trampoline:
371 * Generate a call to mono_amd64_throw_exception/
372 * mono_amd64_throw_corlib_exception.
375 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
379 MonoJumpInfo *ji = NULL;
380 GSList *unwind_ops = NULL;
381 int i, stack_size, arg_offsets [16], ctx_offset, regs_offset, dummy_stack_space;
382 const guint kMaxCodeSize = 256;
385 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
387 dummy_stack_space = 0;
391 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
393 start = code = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
395 /* The stack is unaligned on entry */
396 stack_size = ALIGN_TO (sizeof (MonoContext) + 64 + dummy_stack_space, MONO_ARCH_FRAME_ALIGNMENT) + 8;
401 unwind_ops = mono_arch_get_cie_program ();
404 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
406 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
407 mono_add_unwind_op_sp_alloc (unwind_ops, code, start, stack_size);
411 * To hide linux/windows calling convention differences, we pass all arguments on
412 * the stack by passing 6 dummy values in registers.
415 arg_offsets [0] = dummy_stack_space + 0;
416 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
417 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
418 ctx_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
419 regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
422 for (i = 0; i < AMD64_NREG; ++i)
424 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
426 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
427 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
429 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
430 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RIP * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
431 /* Set arg1 == ctx */
432 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset);
433 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
434 /* Set arg2 == exc/ex_token_index */
436 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [1], 0, sizeof(mgreg_t));
438 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_ARG_REG1, sizeof(mgreg_t));
439 /* Set arg3 == rethrow/pc offset */
441 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
445 * The caller doesn't pass in a pc/pc offset, instead we simply use the
446 * caller ip. Negate the pc adjustment done in mono_amd64_throw_corlib_exception ().
448 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 1, sizeof(mgreg_t));
450 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG2, sizeof(mgreg_t));
452 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], rethrow, sizeof(mgreg_t));
456 const char *icall_name;
459 icall_name = "mono_amd64_resume_unwind";
461 icall_name = "mono_amd64_throw_corlib_exception";
463 icall_name = "mono_amd64_throw_exception";
464 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
465 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
467 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
469 amd64_call_reg (code, AMD64_R11);
470 amd64_breakpoint (code);
472 mono_arch_flush_icache (start, code - start);
474 g_assert ((code - start) < kMaxCodeSize);
475 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
477 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
480 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
486 * mono_arch_get_throw_exception:
487 * \returns a function pointer which can be used to raise
488 * exceptions. The returned function has the following
489 * signature: void (*func) (MonoException *exc);
492 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
494 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
498 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
500 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
504 * mono_arch_get_throw_corlib_exception:
506 * Returns a function pointer which can be used to raise
507 * corlib exceptions. The returned function has the following
508 * signature: void (*func) (guint32 ex_token, guint32 offset);
509 * Here, offset is the offset which needs to be substracted from the caller IP
510 * to get the IP of the throw. Passing the offset has the advantage that it
511 * needs no relocations in the caller.
514 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
516 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
518 #endif /* !DISABLE_JIT */
521 * mono_arch_unwind_frame:
523 * This function is used to gather information from @ctx, and store it in @frame_info.
524 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
525 * is modified if needed.
526 * Returns TRUE on success, FALSE otherwise.
529 mono_arch_unwind_frame (MonoDomain *domain, MonoJitTlsData *jit_tls,
530 MonoJitInfo *ji, MonoContext *ctx,
531 MonoContext *new_ctx, MonoLMF **lmf,
532 mgreg_t **save_locations,
533 StackFrameInfo *frame)
535 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
538 memset (frame, 0, sizeof (StackFrameInfo));
544 mgreg_t regs [MONO_MAX_IREGS + 1];
546 guint32 unwind_info_len;
548 guint8 *epilog = NULL;
550 if (ji->is_trampoline)
551 frame->type = FRAME_TYPE_TRAMPOLINE;
553 frame->type = FRAME_TYPE_MANAGED;
555 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
557 frame->unwind_info = unwind_info;
558 frame->unwind_info_len = unwind_info_len;
561 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
562 mono_print_unwind_info (unwind_info, unwind_info_len);
564 /* LLVM compiled code doesn't have this info */
565 if (ji->has_arch_eh_info)
566 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
568 for (i = 0; i < AMD64_NREG; ++i)
569 regs [i] = new_ctx->gregs [i];
571 mono_unwind_frame (unwind_info, unwind_info_len, (guint8 *)ji->code_start,
572 (guint8*)ji->code_start + ji->code_size,
573 (guint8 *)ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
574 save_locations, MONO_MAX_IREGS, &cfa);
576 for (i = 0; i < AMD64_NREG; ++i)
577 new_ctx->gregs [i] = regs [i];
579 /* The CFA becomes the new SP value */
580 new_ctx->gregs [AMD64_RSP] = (mgreg_t)cfa;
583 new_ctx->gregs [AMD64_RIP] --;
589 if (((guint64)(*lmf)->previous_lmf) & 2) {
590 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
592 if (ext->debugger_invoke) {
594 * This LMF entry is created by the soft debug code to mark transitions to
595 * managed code done during invokes.
597 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
598 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
599 } else if (ext->interp_exit) {
600 frame->type = FRAME_TYPE_INTERP_TO_MANAGED;
601 frame->interp_exit_data = ext->interp_exit_data;
603 g_assert_not_reached ();
606 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
611 if (((guint64)(*lmf)->previous_lmf) & 4) {
612 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
614 rip = (guint64)MONO_CONTEXT_GET_IP (ext->ctx);
615 } else if (((guint64)(*lmf)->previous_lmf) & 1) {
616 /* This LMF has the rip field set */
618 } else if ((*lmf)->rsp == 0) {
623 * The rsp field is set just before the call which transitioned to native
624 * code. Obtain the rip from the stack.
626 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
629 ji = mini_jit_info_table_find (domain, (char *)rip, NULL);
631 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
632 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
640 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
642 if (((guint64)(*lmf)->previous_lmf) & 4) {
643 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
645 /* Trampoline frame */
646 for (i = 0; i < AMD64_NREG; ++i)
647 new_ctx->gregs [i] = ext->ctx->gregs [i];
649 new_ctx->gregs [AMD64_RIP] --;
652 * The registers saved in the LMF will be restored using the normal unwind info,
653 * when the wrapper frame is processed.
657 new_ctx->gregs [AMD64_RIP] = rip;
658 new_ctx->gregs [AMD64_RSP] = (*lmf)->rsp;
659 new_ctx->gregs [AMD64_RBP] = (*lmf)->rbp;
660 for (i = 0; i < AMD64_NREG; ++i) {
661 if (AMD64_IS_CALLEE_SAVED_REG (i) && i != AMD64_RBP)
662 new_ctx->gregs [i] = 0;
666 *lmf = (MonoLMF *)(((guint64)(*lmf)->previous_lmf) & ~7);
677 * Called by resuming from a signal handler.
680 handle_signal_exception (gpointer obj)
682 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
685 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
687 mono_handle_exception (&ctx, (MonoObject *)obj);
689 mono_restore_context (&ctx);
693 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
695 guint64 sp = ctx->gregs [AMD64_RSP];
697 ctx->gregs [AMD64_RDI] = (guint64)user_data;
699 /* Allocate a stack frame below the red zone */
701 /* The stack should be unaligned */
705 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
706 *(guint64*)sp = ctx->gregs [AMD64_RIP];
708 ctx->gregs [AMD64_RSP] = sp;
709 ctx->gregs [AMD64_RIP] = (guint64)async_cb;
713 * mono_arch_handle_exception:
714 * \param ctx saved processor state
715 * \param obj the exception object
718 mono_arch_handle_exception (void *sigctx, gpointer obj)
720 #if defined(MONO_ARCH_USE_SIGACTION)
724 * Handling the exception in the signal handler is problematic, since the original
725 * signal is disabled, and we could run arbitrary code though the debugger. So
726 * resume into the normal stack and do most work there if possible.
728 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
730 /* Pass the ctx parameter in TLS */
731 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
733 mctx = jit_tls->ex_ctx;
734 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
735 mono_monoctx_to_sigctx (&mctx, sigctx);
741 mono_sigctx_to_monoctx (sigctx, &mctx);
743 mono_handle_exception (&mctx, obj);
745 mono_monoctx_to_sigctx (&mctx, sigctx);
752 mono_arch_ip_from_context (void *sigctx)
754 #if defined(MONO_ARCH_USE_SIGACTION)
755 ucontext_t *ctx = (ucontext_t*)sigctx;
757 return (gpointer)UCONTEXT_REG_RIP (ctx);
758 #elif defined(HOST_WIN32)
759 return ((CONTEXT*)sigctx)->Rip;
761 MonoContext *ctx = sigctx;
762 return (gpointer)ctx->gregs [AMD64_RIP];
767 restore_soft_guard_pages (void)
769 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
770 if (jit_tls->stack_ovf_guard_base)
771 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
775 * this function modifies mctx so that when it is restored, it
776 * won't execcute starting at mctx.eip, but in a function that
777 * will restore the protection on the soft-guard pages and return back to
778 * continue at mctx.eip.
781 prepare_for_guard_pages (MonoContext *mctx)
784 sp = (gpointer *)(mctx->gregs [AMD64_RSP]);
786 /* the return addr */
787 sp [0] = (gpointer)(mctx->gregs [AMD64_RIP]);
788 mctx->gregs [AMD64_RIP] = (guint64)restore_soft_guard_pages;
789 mctx->gregs [AMD64_RSP] = (guint64)sp;
793 altstack_handle_and_restore (MonoContext *ctx, MonoObject *obj, gboolean stack_ovf)
796 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), MONO_CONTEXT_GET_IP (ctx), NULL);
799 mono_handle_native_crash ("SIGSEGV", NULL, NULL);
803 mono_handle_exception (&mctx, obj);
805 prepare_for_guard_pages (&mctx);
806 mono_restore_context (&mctx);
810 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
812 #if defined(MONO_ARCH_USE_SIGACTION)
813 MonoException *exc = NULL;
816 MonoContext *copied_ctx;
819 exc = mono_domain_get ()->stack_overflow_ex;
821 /* setup a call frame on the real stack so that control is returned there
822 * and exception handling can continue.
823 * The frame looks like:
827 * 128 is the size of the red zone
829 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
832 sp = (gpointer *)(UCONTEXT_REG_RSP (sigctx) & ~15);
833 sp = (gpointer *)((char*)sp - frame_size);
834 copied_ctx = (MonoContext*)(sp + 4);
835 /* the arguments must be aligned */
836 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
837 mono_sigctx_to_monoctx (sigctx, copied_ctx);
838 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
839 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
840 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
841 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
842 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
843 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
848 mono_amd64_get_original_ip (void)
850 MonoLMF *lmf = mono_get_lmf ();
854 /* Reset the change to previous_lmf */
855 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
862 mono_amd64_get_exception_trampolines (gboolean aot)
865 GSList *tramps = NULL;
867 /* LLVM needs different throw trampolines */
868 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
869 tramps = g_slist_prepend (tramps, info);
871 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
872 tramps = g_slist_prepend (tramps, info);
874 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", aot);
875 tramps = g_slist_prepend (tramps, info);
879 #endif /* !DISABLE_JIT */
882 mono_arch_exceptions_init (void)
888 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
889 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
890 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
891 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
892 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
893 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
895 /* Call this to avoid initialization races */
896 tramps = mono_amd64_get_exception_trampolines (FALSE);
897 for (l = tramps; l; l = l->next) {
898 MonoTrampInfo *info = (MonoTrampInfo *)l->data;
900 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
901 mono_tramp_info_register (info, NULL);
903 g_slist_free (tramps);
907 // Implies defined(TARGET_WIN32)
908 #ifdef MONO_ARCH_HAVE_UNWIND_TABLE
911 mono_arch_unwindinfo_create (gpointer* monoui)
913 PUNWIND_INFO newunwindinfo;
914 *monoui = newunwindinfo = g_new0 (UNWIND_INFO, 1);
915 newunwindinfo->Version = 1;
919 mono_arch_unwindinfo_add_push_nonvol (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
921 PUNWIND_CODE unwindcode;
924 g_assert (unwindinfo != NULL);
926 if (unwindinfo->CountOfCodes >= MONO_MAX_UNWIND_CODES)
927 g_error ("Larger allocation needed for the unwind information.");
929 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
930 unwindcode = &unwindinfo->UnwindCode [codeindex];
931 unwindcode->UnwindOp = UWOP_PUSH_NONVOL;
932 unwindcode->CodeOffset = (guchar)unwind_op->when;
933 unwindcode->OpInfo = unwind_op->reg;
935 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
936 g_error ("Adding unwind info in wrong order.");
938 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
942 mono_arch_unwindinfo_add_set_fpreg (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
944 PUNWIND_CODE unwindcode;
947 g_assert (unwindinfo != NULL);
949 if (unwindinfo->CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
950 g_error ("Larger allocation needed for the unwind information.");
952 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->CountOfCodes);
953 unwindcode = &unwindinfo->UnwindCode [codeindex];
954 unwindcode->UnwindOp = UWOP_SET_FPREG;
955 unwindcode->CodeOffset = (guchar)unwind_op->when;
957 g_assert (unwind_op->val % 16 == 0);
958 unwindinfo->FrameRegister = unwind_op->reg;
959 unwindinfo->FrameOffset = unwind_op->val / 16;
961 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
962 g_error ("Adding unwind info in wrong order.");
964 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
968 mono_arch_unwindinfo_add_alloc_stack (PUNWIND_INFO unwindinfo, MonoUnwindOp *unwind_op)
970 PUNWIND_CODE unwindcode;
975 g_assert (unwindinfo != NULL);
977 size = unwind_op->val;
980 g_error ("Stack allocation must be equal to or greater than 0x8.");
984 else if (size <= 0x7FFF8)
989 if (unwindinfo->CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
990 g_error ("Larger allocation needed for the unwind information.");
992 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->CountOfCodes += codesneeded);
993 unwindcode = &unwindinfo->UnwindCode [codeindex];
995 unwindcode->CodeOffset = (guchar)unwind_op->when;
997 if (codesneeded == 1) {
998 /*The size of the allocation is
999 (the number in the OpInfo member) times 8 plus 8*/
1000 unwindcode->UnwindOp = UWOP_ALLOC_SMALL;
1001 unwindcode->OpInfo = (size - 8)/8;
1004 if (codesneeded == 3) {
1005 /*the unscaled size of the allocation is recorded
1006 in the next two slots in little-endian format.
1007 NOTE, unwind codes are allocated from end to begining of list so
1008 unwind code will have right execution order. List is sorted on CodeOffset
1009 using descending sort order.*/
1010 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1011 unwindcode->OpInfo = 1;
1012 *((unsigned int*)(&(unwindcode + 1)->FrameOffset)) = size;
1015 /*the size of the allocation divided by 8
1016 is recorded in the next slot.
1017 NOTE, unwind codes are allocated from end to begining of list so
1018 unwind code will have right execution order. List is sorted on CodeOffset
1019 using descending sort order.*/
1020 unwindcode->UnwindOp = UWOP_ALLOC_LARGE;
1021 unwindcode->OpInfo = 0;
1022 (unwindcode + 1)->FrameOffset = (gushort)(size/8);
1026 if (unwindinfo->SizeOfProlog >= unwindcode->CodeOffset)
1027 g_error ("Adding unwind info in wrong order.");
1029 unwindinfo->SizeOfProlog = unwindcode->CodeOffset;
1032 static gboolean g_dyn_func_table_inited;
1034 // Dynamic function table used when registering unwind info for OS unwind support.
1035 static GList *g_dynamic_function_table_begin;
1036 static GList *g_dynamic_function_table_end;
1038 // SRW lock (lightweight read/writer lock) protecting dynamic function table.
1039 static SRWLOCK g_dynamic_function_table_lock = SRWLOCK_INIT;
1041 // Module handle used when explicit loading ntdll.
1042 static HMODULE g_ntdll;
1044 // If Win8 or Win2012Server or later, use growable function tables instead
1045 // of callbacks. Callback solution will still be fallback on older systems.
1046 static RtlAddGrowableFunctionTablePtr g_rtl_add_growable_function_table;
1047 static RtlGrowFunctionTablePtr g_rtl_grow_function_table;
1048 static RtlDeleteGrowableFunctionTablePtr g_rtl_delete_growable_function_table;
1050 // When using function table callback solution an out of proc module is needed by
1051 // debuggers in order to read unwind info from debug target.
1053 #define MONO_DAC_MODULE TEXT("mono-2.0-dac-sgen.dll")
1055 #define MONO_DAC_MODULE TEXT("mono-2.0-sgen.dll")
1058 #define MONO_DAC_MODULE_MAX_PATH 1024
1061 init_table_no_lock (void)
1063 if (g_dyn_func_table_inited == FALSE) {
1064 g_assert_checked (g_dynamic_function_table_begin == NULL);
1065 g_assert_checked (g_dynamic_function_table_end == NULL);
1066 g_assert_checked (g_rtl_add_growable_function_table == NULL);
1067 g_assert_checked (g_rtl_grow_function_table == NULL);
1068 g_assert_checked (g_rtl_delete_growable_function_table == NULL);
1069 g_assert_checked (g_ntdll == NULL);
1071 // Load functions available on Win8/Win2012Server or later. If running on earlier
1072 // systems the below GetProceAddress will fail, this is expected behavior.
1073 if (GetModuleHandleEx (0, TEXT("ntdll.dll"), &g_ntdll) == TRUE) {
1074 g_rtl_add_growable_function_table = (RtlAddGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlAddGrowableFunctionTable");
1075 g_rtl_grow_function_table = (RtlGrowFunctionTablePtr)GetProcAddress (g_ntdll, "RtlGrowFunctionTable");
1076 g_rtl_delete_growable_function_table = (RtlDeleteGrowableFunctionTablePtr)GetProcAddress (g_ntdll, "RtlDeleteGrowableFunctionTable");
1079 g_dyn_func_table_inited = TRUE;
1084 mono_arch_unwindinfo_init_table (void)
1086 if (g_dyn_func_table_inited == FALSE) {
1088 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1090 init_table_no_lock ();
1092 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1097 terminate_table_no_lock (void)
1099 if (g_dyn_func_table_inited == TRUE) {
1100 if (g_dynamic_function_table_begin != NULL) {
1101 // Free all list elements.
1102 for (GList *l = g_dynamic_function_table_begin; l; l = l->next) {
1110 g_list_free (g_dynamic_function_table_begin);
1111 g_dynamic_function_table_begin = NULL;
1112 g_dynamic_function_table_end = NULL;
1115 g_rtl_delete_growable_function_table = NULL;
1116 g_rtl_grow_function_table = NULL;
1117 g_rtl_add_growable_function_table = NULL;
1119 if (g_ntdll != NULL) {
1120 FreeLibrary (g_ntdll);
1124 g_dyn_func_table_inited = FALSE;
1129 mono_arch_unwindinfo_terminate_table (void)
1131 if (g_dyn_func_table_inited == TRUE) {
1133 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1135 terminate_table_no_lock ();
1137 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1142 fast_find_range_in_table_no_lock_ex (gsize begin_range, gsize end_range, gboolean *continue_search)
1144 GList *found_entry = NULL;
1146 // Fast path, look at boundaries.
1147 if (g_dynamic_function_table_begin != NULL) {
1148 DynamicFunctionTableEntry *first_entry = g_dynamic_function_table_begin->data;
1149 DynamicFunctionTableEntry *last_entry = (g_dynamic_function_table_end != NULL ) ? g_dynamic_function_table_end->data : first_entry;
1151 // Sorted in descending order based on begin_range, check first item, that is the entry with highest range.
1152 if (first_entry != NULL && first_entry->begin_range <= begin_range && first_entry->end_range >= end_range) {
1153 // Entry belongs to first entry in list.
1154 found_entry = g_dynamic_function_table_begin;
1155 *continue_search = FALSE;
1157 if (first_entry != NULL && first_entry->begin_range >= begin_range) {
1158 if (last_entry != NULL && last_entry->begin_range <= begin_range) {
1159 // Entry has a range that could exist in table, continue search.
1160 *continue_search = TRUE;
1169 static inline DynamicFunctionTableEntry *
1170 fast_find_range_in_table_no_lock (gsize begin_range, gsize end_range, gboolean *continue_search)
1172 GList *found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, continue_search);
1173 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1177 find_range_in_table_no_lock_ex (const gpointer code_block, gsize block_size)
1179 GList *found_entry = NULL;
1180 gboolean continue_search = FALSE;
1182 gsize begin_range = (gsize)code_block;
1183 gsize end_range = begin_range + block_size;
1185 // Fast path, check table boundaries.
1186 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, end_range, &continue_search);
1187 if (found_entry || continue_search == FALSE)
1190 // Scan table for an entry including range.
1191 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1192 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1193 g_assert_checked (current_entry != NULL);
1195 // Do we have a match?
1196 if (current_entry->begin_range == begin_range && current_entry->end_range == end_range) {
1205 static inline DynamicFunctionTableEntry *
1206 find_range_in_table_no_lock (const gpointer code_block, gsize block_size)
1208 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1209 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1213 find_pc_in_table_no_lock_ex (const gpointer pc)
1215 GList *found_entry = NULL;
1216 gboolean continue_search = FALSE;
1218 gsize begin_range = (gsize)pc;
1219 gsize end_range = begin_range;
1221 // Fast path, check table boundaries.
1222 found_entry = fast_find_range_in_table_no_lock_ex (begin_range, begin_range, &continue_search);
1223 if (found_entry || continue_search == FALSE)
1226 // Scan table for a entry including range.
1227 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1228 DynamicFunctionTableEntry *current_entry = (DynamicFunctionTableEntry *)node->data;
1229 g_assert_checked (current_entry != NULL);
1231 // Do we have a match?
1232 if (current_entry->begin_range <= begin_range && current_entry->end_range >= end_range) {
1241 static inline DynamicFunctionTableEntry *
1242 find_pc_in_table_no_lock (const gpointer pc)
1244 GList *found_entry = find_pc_in_table_no_lock_ex (pc);
1245 return (found_entry != NULL) ? (DynamicFunctionTableEntry *)found_entry->data : NULL;
1248 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1250 validate_table_no_lock (void)
1252 // Validation method checking that table is sorted as expected and don't include overlapped regions.
1253 // Method will assert on failure to explicitly indicate what check failed.
1254 if (g_dynamic_function_table_begin != NULL) {
1255 g_assert_checked (g_dynamic_function_table_end != NULL);
1257 DynamicFunctionTableEntry *prevoious_entry = NULL;
1258 DynamicFunctionTableEntry *current_entry = NULL;
1259 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1260 current_entry = (DynamicFunctionTableEntry *)node->data;
1262 g_assert_checked (current_entry != NULL);
1263 g_assert_checked (current_entry->end_range > current_entry->begin_range);
1265 if (prevoious_entry != NULL) {
1266 // List should be sorted in descending order on begin_range.
1267 g_assert_checked (prevoious_entry->begin_range > current_entry->begin_range);
1269 // Check for overlapped regions.
1270 g_assert_checked (prevoious_entry->begin_range >= current_entry->end_range);
1273 prevoious_entry = current_entry;
1281 validate_table_no_lock (void)
1285 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1288 static PRUNTIME_FUNCTION MONO_GET_RUNTIME_FUNCTION_CALLBACK (DWORD64 ControlPc, IN PVOID Context);
1290 DynamicFunctionTableEntry *
1291 mono_arch_unwindinfo_insert_range_in_table (const gpointer code_block, gsize block_size)
1293 DynamicFunctionTableEntry *new_entry = NULL;
1295 gsize begin_range = (gsize)code_block;
1296 gsize end_range = begin_range + block_size;
1298 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1299 init_table_no_lock ();
1300 new_entry = find_range_in_table_no_lock (code_block, block_size);
1301 if (new_entry == NULL) {
1302 // Allocate new entry.
1303 new_entry = g_new0 (DynamicFunctionTableEntry, 1);
1304 if (new_entry != NULL) {
1306 // Pre-allocate RUNTIME_FUNCTION array, assume average method size of
1307 // MONO_UNWIND_INFO_RT_FUNC_SIZE bytes.
1308 InitializeSRWLock (&new_entry->lock);
1309 new_entry->handle = NULL;
1310 new_entry->begin_range = begin_range;
1311 new_entry->end_range = end_range;
1312 new_entry->rt_funcs_max_count = (block_size / MONO_UNWIND_INFO_RT_FUNC_SIZE) + 1;
1313 new_entry->rt_funcs_current_count = 0;
1314 new_entry->rt_funcs = g_new0 (RUNTIME_FUNCTION, new_entry->rt_funcs_max_count);
1316 if (new_entry->rt_funcs != NULL) {
1317 // Check insert on boundaries. List is sorted descending on begin_range.
1318 if (g_dynamic_function_table_begin == NULL) {
1319 g_dynamic_function_table_begin = g_list_append (g_dynamic_function_table_begin, new_entry);
1320 g_dynamic_function_table_end = g_dynamic_function_table_begin;
1321 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_begin->data))->begin_range < begin_range) {
1322 // Insert at the head.
1323 g_dynamic_function_table_begin = g_list_prepend (g_dynamic_function_table_begin, new_entry);
1324 } else if (((DynamicFunctionTableEntry *)(g_dynamic_function_table_end->data))->begin_range > begin_range) {
1326 g_list_append (g_dynamic_function_table_end, new_entry);
1327 g_dynamic_function_table_end = g_dynamic_function_table_end->next;
1329 //Search and insert at correct position.
1330 for (GList *node = g_dynamic_function_table_begin; node; node = node->next) {
1331 DynamicFunctionTableEntry * current_entry = (DynamicFunctionTableEntry *)node->data;
1332 g_assert_checked (current_entry != NULL);
1334 if (current_entry->begin_range < new_entry->begin_range) {
1335 g_dynamic_function_table_begin = g_list_insert_before (g_dynamic_function_table_begin, node, new_entry);
1341 // Register dynamic function table entry with OS.
1342 if (g_rtl_add_growable_function_table != NULL) {
1343 // Allocate new growable handle table for entry.
1344 g_assert_checked (new_entry->handle == NULL);
1345 DWORD result = g_rtl_add_growable_function_table (&new_entry->handle,
1346 new_entry->rt_funcs, new_entry->rt_funcs_current_count,
1347 new_entry->rt_funcs_max_count, new_entry->begin_range, new_entry->end_range);
1350 WCHAR buffer [MONO_DAC_MODULE_MAX_PATH] = { 0 };
1351 WCHAR *path = buffer;
1353 // DAC module should be in the same directory as the
1355 GetModuleFileNameW (NULL, buffer, G_N_ELEMENTS(buffer));
1356 path = wcsrchr (buffer, TEXT('\\'));
1362 wcscat_s (buffer, G_N_ELEMENTS(buffer), MONO_DAC_MODULE);
1365 // Register function table callback + out of proc module.
1366 new_entry->handle = (PVOID)((DWORD64)(new_entry->begin_range) | 3);
1367 BOOLEAN result = RtlInstallFunctionTableCallback ((DWORD64)(new_entry->handle),
1368 (DWORD64)(new_entry->begin_range), (DWORD)(new_entry->end_range - new_entry->begin_range),
1369 MONO_GET_RUNTIME_FUNCTION_CALLBACK, new_entry, path);
1373 // Only included in checked builds. Validates the structure of table after insert.
1374 validate_table_no_lock ();
1382 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1388 remove_range_in_table_no_lock (GList *entry)
1390 if (entry != NULL) {
1391 if (entry == g_dynamic_function_table_end)
1392 g_dynamic_function_table_end = entry->prev;
1394 g_dynamic_function_table_begin = g_list_remove_link (g_dynamic_function_table_begin, entry);
1395 DynamicFunctionTableEntry *removed_entry = (DynamicFunctionTableEntry *)entry->data;
1397 g_assert_checked (removed_entry != NULL);
1398 g_assert_checked (removed_entry->rt_funcs != NULL);
1400 // Remove function table from OS.
1401 if (removed_entry->handle != NULL) {
1402 if (g_rtl_delete_growable_function_table != NULL) {
1403 g_rtl_delete_growable_function_table (removed_entry->handle);
1405 RtlDeleteFunctionTable ((PRUNTIME_FUNCTION)removed_entry->handle);
1409 g_free (removed_entry->rt_funcs);
1410 g_free (removed_entry);
1412 g_list_free_1 (entry);
1415 // Only included in checked builds. Validates the structure of table after remove.
1416 validate_table_no_lock ();
1420 mono_arch_unwindinfo_remove_pc_range_in_table (const gpointer code)
1422 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1424 GList *found_entry = find_pc_in_table_no_lock_ex (code);
1426 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code);
1427 remove_range_in_table_no_lock (found_entry);
1429 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1433 mono_arch_unwindinfo_remove_range_in_table (const gpointer code_block, gsize block_size)
1435 AcquireSRWLockExclusive (&g_dynamic_function_table_lock);
1437 GList *found_entry = find_range_in_table_no_lock_ex (code_block, block_size);
1439 g_assert_checked (found_entry != NULL || ((DynamicFunctionTableEntry *)found_entry->data)->begin_range == (gsize)code_block);
1440 remove_range_in_table_no_lock (found_entry);
1442 ReleaseSRWLockExclusive (&g_dynamic_function_table_lock);
1446 mono_arch_unwindinfo_find_rt_func_in_table (const gpointer code, gsize code_size)
1448 PRUNTIME_FUNCTION found_rt_func = NULL;
1450 gsize begin_range = (gsize)code;
1451 gsize end_range = begin_range + code_size;
1453 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1455 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1457 if (found_entry != NULL) {
1459 AcquireSRWLockShared (&found_entry->lock);
1461 g_assert_checked (found_entry->begin_range <= begin_range);
1462 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1463 g_assert_checked (found_entry->rt_funcs != NULL);
1465 for (int i = 0; i < found_entry->rt_funcs_current_count; ++i) {
1466 PRUNTIME_FUNCTION current_rt_func = (PRUNTIME_FUNCTION)(&found_entry->rt_funcs [i]);
1468 // Is this our RT function entry?
1469 if (found_entry->begin_range + current_rt_func->BeginAddress <= begin_range &&
1470 found_entry->begin_range + current_rt_func->EndAddress >= end_range) {
1471 found_rt_func = current_rt_func;
1476 ReleaseSRWLockShared (&found_entry->lock);
1479 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1481 return found_rt_func;
1484 static inline PRUNTIME_FUNCTION
1485 mono_arch_unwindinfo_find_pc_rt_func_in_table (const gpointer pc)
1487 return mono_arch_unwindinfo_find_rt_func_in_table (pc, 0);
1490 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1492 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1494 // Validation method checking that runtime function table is sorted as expected and don't include overlapped regions.
1495 // Method will assert on failure to explicitly indicate what check failed.
1496 g_assert_checked (entry != NULL);
1497 g_assert_checked (entry->rt_funcs_max_count >= entry->rt_funcs_current_count);
1498 g_assert_checked (entry->rt_funcs != NULL);
1500 PRUNTIME_FUNCTION current_rt_func = NULL;
1501 PRUNTIME_FUNCTION previous_rt_func = NULL;
1502 for (int i = 0; i < entry->rt_funcs_current_count; ++i) {
1503 current_rt_func = &(entry->rt_funcs [i]);
1505 g_assert_checked (current_rt_func->BeginAddress < current_rt_func->EndAddress);
1506 g_assert_checked (current_rt_func->EndAddress <= current_rt_func->UnwindData);
1508 if (previous_rt_func != NULL) {
1509 // List should be sorted in ascending order based on BeginAddress.
1510 g_assert_checked (previous_rt_func->BeginAddress < current_rt_func->BeginAddress);
1512 // Check for overlapped regions.
1513 g_assert_checked (previous_rt_func->EndAddress <= current_rt_func->BeginAddress);
1516 previous_rt_func = current_rt_func;
1523 validate_rt_funcs_in_table_no_lock (DynamicFunctionTableEntry *entry)
1527 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1530 mono_arch_unwindinfo_insert_rt_func_in_table (const gpointer code, gsize code_size)
1532 PRUNTIME_FUNCTION new_rt_func = NULL;
1534 gsize begin_range = (gsize)code;
1535 gsize end_range = begin_range + code_size;
1537 AcquireSRWLockShared (&g_dynamic_function_table_lock);
1539 DynamicFunctionTableEntry *found_entry = find_pc_in_table_no_lock (code);
1541 if (found_entry != NULL) {
1543 AcquireSRWLockExclusive (&found_entry->lock);
1545 g_assert_checked (found_entry->begin_range <= begin_range);
1546 g_assert_checked (found_entry->end_range >= begin_range && found_entry->end_range >= end_range);
1547 g_assert_checked (found_entry->rt_funcs != NULL);
1548 g_assert_checked ((guchar*)code - found_entry->begin_range >= 0);
1550 gsize code_offset = (gsize)code - found_entry->begin_range;
1551 gsize entry_count = found_entry->rt_funcs_current_count;
1552 gsize max_entry_count = found_entry->rt_funcs_max_count;
1553 PRUNTIME_FUNCTION current_rt_funcs = found_entry->rt_funcs;
1555 RUNTIME_FUNCTION new_rt_func_data;
1556 new_rt_func_data.BeginAddress = code_offset;
1557 new_rt_func_data.EndAddress = code_offset + code_size;
1559 gsize aligned_unwind_data = ALIGN_TO(end_range, sizeof (mgreg_t));
1560 new_rt_func_data.UnwindData = aligned_unwind_data - found_entry->begin_range;
1562 g_assert_checked (new_rt_func_data.UnwindData == ALIGN_TO(new_rt_func_data.EndAddress, sizeof (mgreg_t)));
1564 PRUNTIME_FUNCTION new_rt_funcs = NULL;
1566 // List needs to be sorted in ascending order based on BeginAddress (Windows requirement if list
1567 // going to be directly reused in OS func tables. Check if we can append to end of existing table without realloc.
1568 if (entry_count == 0 || (entry_count < max_entry_count) && (current_rt_funcs [entry_count - 1].BeginAddress) < code_offset) {
1569 new_rt_func = &(current_rt_funcs [entry_count]);
1570 *new_rt_func = new_rt_func_data;
1573 // No easy way out, need to realloc, grow to double size (or current max, if to small).
1574 max_entry_count = entry_count * 2 > max_entry_count ? entry_count * 2 : max_entry_count;
1575 new_rt_funcs = g_new0 (RUNTIME_FUNCTION, max_entry_count);
1577 if (new_rt_funcs != NULL) {
1578 gsize from_index = 0;
1581 // Copy from old table into new table. Make sure new rt func gets inserted
1582 // into correct location based on sort order.
1583 for (; from_index < entry_count; ++from_index) {
1584 if (new_rt_func == NULL && current_rt_funcs [from_index].BeginAddress > new_rt_func_data.BeginAddress) {
1585 new_rt_func = &(new_rt_funcs [to_index++]);
1586 *new_rt_func = new_rt_func_data;
1589 if (current_rt_funcs [from_index].UnwindData != 0)
1590 new_rt_funcs [to_index++] = current_rt_funcs [from_index];
1593 // If we didn't insert by now, put it last in the list.
1594 if (new_rt_func == NULL) {
1595 new_rt_func = &(new_rt_funcs [to_index]);
1596 *new_rt_func = new_rt_func_data;
1603 // Update the stats for current entry.
1604 found_entry->rt_funcs_current_count = entry_count;
1605 found_entry->rt_funcs_max_count = max_entry_count;
1607 if (new_rt_funcs == NULL && g_rtl_grow_function_table != NULL) {
1608 // No new table just report increase in use.
1609 g_assert_checked (found_entry->handle != NULL);
1610 g_rtl_grow_function_table (found_entry->handle, found_entry->rt_funcs_current_count);
1611 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table != NULL) {
1612 // New table, delete old table and rt funcs, and register a new one.
1613 g_assert_checked (g_rtl_delete_growable_function_table != NULL);
1614 g_rtl_delete_growable_function_table (found_entry->handle);
1615 found_entry->handle = NULL;
1616 g_free (found_entry->rt_funcs);
1617 found_entry->rt_funcs = new_rt_funcs;
1618 DWORD result = g_rtl_add_growable_function_table (&found_entry->handle,
1619 found_entry->rt_funcs, found_entry->rt_funcs_current_count,
1620 found_entry->rt_funcs_max_count, found_entry->begin_range, found_entry->end_range);
1622 } else if (new_rt_funcs != NULL && g_rtl_add_growable_function_table == NULL) {
1623 // No table registered with OS, callback solution in use. Switch tables.
1624 g_free (found_entry->rt_funcs);
1625 found_entry->rt_funcs = new_rt_funcs;
1626 } else if (new_rt_funcs == NULL && g_rtl_grow_function_table == NULL) {
1627 // No table registered with OS, callback solution in use, nothing to do.
1629 g_assert_not_reached ();
1632 // Only included in checked builds. Validates the structure of table after insert.
1633 validate_rt_funcs_in_table_no_lock (found_entry);
1635 ReleaseSRWLockExclusive (&found_entry->lock);
1638 ReleaseSRWLockShared (&g_dynamic_function_table_lock);
1643 static PRUNTIME_FUNCTION
1644 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1646 return mono_arch_unwindinfo_find_pc_rt_func_in_table ((gpointer)ControlPc);
1650 initialize_unwind_info_internal_ex (GSList *unwind_ops, PUNWIND_INFO unwindinfo)
1652 if (unwind_ops != NULL && unwindinfo != NULL) {
1653 MonoUnwindOp *unwind_op_data;
1654 gboolean sp_alloced = FALSE;
1655 gboolean fp_alloced = FALSE;
1657 // Replay collected unwind info and setup Windows format.
1658 for (GSList *l = unwind_ops; l; l = l->next) {
1659 unwind_op_data = (MonoUnwindOp *)l->data;
1660 switch (unwind_op_data->op) {
1661 case DW_CFA_offset : {
1662 // Pushes should go before SP/FP allocation to be compliant with Windows x64 ABI.
1663 // TODO: DW_CFA_offset can also be used to move saved regs into frame.
1664 if (unwind_op_data->reg != AMD64_RIP && sp_alloced == FALSE && fp_alloced == FALSE)
1665 mono_arch_unwindinfo_add_push_nonvol (unwindinfo, unwind_op_data);
1668 case DW_CFA_mono_sp_alloc_info_win64 : {
1669 mono_arch_unwindinfo_add_alloc_stack (unwindinfo, unwind_op_data);
1673 case DW_CFA_mono_fp_alloc_info_win64 : {
1674 mono_arch_unwindinfo_add_set_fpreg (unwindinfo, unwind_op_data);
1686 initialize_unwind_info_internal (GSList *unwind_ops)
1688 PUNWIND_INFO unwindinfo;
1690 mono_arch_unwindinfo_create (&unwindinfo);
1691 initialize_unwind_info_internal_ex (unwind_ops, unwindinfo);
1697 mono_arch_unwindinfo_get_code_count (GSList *unwind_ops)
1699 UNWIND_INFO unwindinfo = {0};
1700 initialize_unwind_info_internal_ex (unwind_ops, &unwindinfo);
1701 return unwindinfo.CountOfCodes;
1705 mono_arch_unwindinfo_init_method_unwind_info (gpointer cfg)
1707 MonoCompile * current_cfg = (MonoCompile *)cfg;
1708 g_assert (current_cfg->arch.unwindinfo == NULL);
1709 current_cfg->arch.unwindinfo = initialize_unwind_info_internal (current_cfg->unwind_ops);
1710 return mono_arch_unwindinfo_get_size (((PUNWIND_INFO)(current_cfg->arch.unwindinfo))->CountOfCodes);
1714 mono_arch_unwindinfo_install_method_unwind_info (gpointer *monoui, gpointer code, guint code_size)
1716 PUNWIND_INFO unwindinfo, targetinfo;
1718 guint64 targetlocation;
1722 unwindinfo = (PUNWIND_INFO)*monoui;
1723 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1724 targetinfo = (PUNWIND_INFO) ALIGN_TO(targetlocation, sizeof (mgreg_t));
1726 memcpy (targetinfo, unwindinfo, sizeof (UNWIND_INFO) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1728 codecount = unwindinfo->CountOfCodes;
1730 memcpy (&targetinfo->UnwindCode [0], &unwindinfo->UnwindCode [MONO_MAX_UNWIND_CODES - codecount],
1731 sizeof (UNWIND_CODE) * codecount);
1734 #ifdef ENABLE_CHECKED_BUILD_UNWINDINFO
1736 // Validate the order of unwind op codes in checked builds. Offset should be in descending order.
1737 // In first iteration previous == current, this is intended to handle UWOP_ALLOC_LARGE as first item.
1739 for (int current = 0; current < codecount; current++) {
1740 g_assert_checked (targetinfo->UnwindCode [previous].CodeOffset >= targetinfo->UnwindCode [current].CodeOffset);
1742 if (targetinfo->UnwindCode [current].UnwindOp == UWOP_ALLOC_LARGE) {
1743 if (targetinfo->UnwindCode [current].OpInfo == 0) {
1751 #endif /* ENABLE_CHECKED_BUILD_UNWINDINFO */
1753 g_free (unwindinfo);
1756 // Register unwind info in table.
1757 mono_arch_unwindinfo_insert_rt_func_in_table (code, code_size);
1761 mono_arch_unwindinfo_install_tramp_unwind_info (GSList *unwind_ops, gpointer code, guint code_size)
1763 PUNWIND_INFO unwindinfo = initialize_unwind_info_internal (unwind_ops);
1764 if (unwindinfo != NULL) {
1765 mono_arch_unwindinfo_install_method_unwind_info (&unwindinfo, code, code_size);
1770 mono_arch_code_chunk_new (void *chunk, int size)
1772 mono_arch_unwindinfo_insert_range_in_table (chunk, size);
1775 void mono_arch_code_chunk_destroy (void *chunk)
1777 mono_arch_unwindinfo_remove_pc_range_in_table (chunk);
1779 #endif /* MONO_ARCH_HAVE_UNWIND_TABLE */
1781 #if MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT)
1782 MonoContinuationRestore
1783 mono_tasklets_arch_restore (void)
1785 static guint8* saved = NULL;
1786 guint8 *code, *start;
1787 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1788 const guint kMaxCodeSize = 64;
1792 return (MonoContinuationRestore)saved;
1793 code = start = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
1794 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1795 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1796 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1797 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1798 * We move cont to cont_reg since we need both rcx and rdi for the copy
1799 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1801 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1802 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1803 /* setup the copy of the stack */
1804 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1805 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1807 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1808 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1809 amd64_prefix (code, X86_REP_PREFIX);
1812 /* now restore the registers from the LMF */
1813 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1814 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
1815 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1818 amd64_mov_reg_reg (code, AMD64_R14, AMD64_ARG_REG3, 8);
1820 amd64_mov_reg_reg (code, AMD64_R12, AMD64_ARG_REG3, 8);
1823 /* state is already in rax */
1824 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1825 g_assert ((code - start) <= kMaxCodeSize);
1827 mono_arch_flush_icache (start, code - start);
1828 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1831 return (MonoContinuationRestore)saved;
1833 #endif /* MONO_SUPPORT_TASKLETS && !defined(DISABLE_JIT) */
1836 * mono_arch_setup_resume_sighandler_ctx:
1838 * Setup CTX so execution continues at FUNC.
1841 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1844 * When resuming from a signal handler, the stack should be misaligned, just like right after
1847 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1848 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1849 MONO_CONTEXT_SET_IP (ctx, func);
1854 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
1856 g_assert_not_reached ();
1861 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
1863 g_assert_not_reached ();
1868 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
1870 g_assert_not_reached ();
1875 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
1877 g_assert_not_reached ();
1882 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
1884 g_assert_not_reached ();
1889 mono_amd64_get_exception_trampolines (gboolean aot)
1891 g_assert_not_reached ();
1894 #endif /* DISABLE_JIT */
1896 #if !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT)
1897 MonoContinuationRestore
1898 mono_tasklets_arch_restore (void)
1900 g_assert_not_reached ();
1903 #endif /* !MONO_SUPPORT_TASKLETS || defined(DISABLE_JIT) */