2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
19 #ifdef HAVE_UCONTEXT_H
23 #include <mono/arch/amd64/amd64-codegen.h>
24 #include <mono/metadata/abi-details.h>
25 #include <mono/metadata/appdomain.h>
26 #include <mono/metadata/tabledefs.h>
27 #include <mono/metadata/threads.h>
28 #include <mono/metadata/threads-types.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/exception.h>
31 #include <mono/metadata/gc-internal.h>
32 #include <mono/metadata/mono-debug.h>
33 #include <mono/utils/mono-mmap.h>
36 #include "mini-amd64.h"
39 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
42 static MonoW32ExceptionHandler fpe_handler;
43 static MonoW32ExceptionHandler ill_handler;
44 static MonoW32ExceptionHandler segv_handler;
46 LPTOP_LEVEL_EXCEPTION_FILTER mono_old_win_toplevel_exception_filter;
47 void *mono_win_vectored_exception_handle;
49 #define W32_SEH_HANDLE_EX(_ex) \
50 if (_ex##_handler) _ex##_handler(0, ep, ctx)
52 static LONG CALLBACK seh_unhandled_exception_filter(EXCEPTION_POINTERS* ep)
54 #ifndef MONO_CROSS_COMPILE
55 if (mono_old_win_toplevel_exception_filter) {
56 return (*mono_old_win_toplevel_exception_filter)(ep);
60 mono_handle_native_sigsegv (SIGSEGV, NULL, NULL);
62 return EXCEPTION_CONTINUE_SEARCH;
66 * Unhandled Exception Filter
67 * Top-level per-process exception handler.
69 static LONG CALLBACK seh_vectored_exception_handler(EXCEPTION_POINTERS* ep)
74 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
76 /* If the thread is not managed by the runtime return early */
78 return EXCEPTION_CONTINUE_SEARCH;
80 jit_tls->mono_win_chained_exception_needs_run = FALSE;
81 res = EXCEPTION_CONTINUE_EXECUTION;
83 er = ep->ExceptionRecord;
84 ctx = ep->ContextRecord;
86 switch (er->ExceptionCode) {
87 case EXCEPTION_ACCESS_VIOLATION:
88 W32_SEH_HANDLE_EX(segv);
90 case EXCEPTION_ILLEGAL_INSTRUCTION:
91 W32_SEH_HANDLE_EX(ill);
93 case EXCEPTION_INT_DIVIDE_BY_ZERO:
94 case EXCEPTION_INT_OVERFLOW:
95 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
96 case EXCEPTION_FLT_OVERFLOW:
97 case EXCEPTION_FLT_UNDERFLOW:
98 case EXCEPTION_FLT_INEXACT_RESULT:
99 W32_SEH_HANDLE_EX(fpe);
102 jit_tls->mono_win_chained_exception_needs_run = TRUE;
106 if (jit_tls->mono_win_chained_exception_needs_run) {
107 /* Don't copy context back if we chained exception
108 * as the handler may have modfied the EXCEPTION_POINTERS
109 * directly. We don't pass sigcontext to chained handlers.
110 * Return continue search so the UnhandledExceptionFilter
111 * can correctly chain the exception.
113 res = EXCEPTION_CONTINUE_SEARCH;
119 void win32_seh_init()
121 mono_old_win_toplevel_exception_filter = SetUnhandledExceptionFilter(seh_unhandled_exception_filter);
122 mono_win_vectored_exception_handle = AddVectoredExceptionHandler (1, seh_vectored_exception_handler);
125 void win32_seh_cleanup()
129 if (mono_old_win_toplevel_exception_filter) SetUnhandledExceptionFilter(mono_old_win_toplevel_exception_filter);
131 ret = RemoveVectoredExceptionHandler (mono_win_vectored_exception_handle);
135 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
139 fpe_handler = handler;
142 ill_handler = handler;
145 segv_handler = handler;
152 #endif /* TARGET_WIN32 */
155 * mono_arch_get_restore_context:
157 * Returns a pointer to a method which restores a previously saved sigcontext.
160 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
162 guint8 *start = NULL;
164 MonoJumpInfo *ji = NULL;
165 GSList *unwind_ops = NULL;
167 /* restore_contect (MonoContext *ctx) */
169 start = code = mono_global_codeman_reserve (256);
171 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
173 /* Restore all registers except %rip and %r11 */
174 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rax), 8);
175 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rcx), 8);
176 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rdx), 8);
177 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
178 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
179 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
180 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
181 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r8), 8);
182 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r9), 8);
183 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r10), 8);
184 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r12), 8);
185 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r13), 8);
186 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r14), 8);
187 #if !defined(__native_client_codegen__)
188 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, r15), 8);
192 * The context resides on the stack, in the stack frame of the
193 * caller of this function. The stack pointer that we need to
194 * restore is potentially many stack frames higher up, so the
195 * distance between them can easily be more than the red zone
196 * size. Hence the stack pointer can be restored only after
197 * we have finished loading everything from the context.
199 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rsp), 8);
200 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (MonoContext, rip), 8);
201 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
203 /* jump to the saved IP */
204 amd64_jump_reg (code, AMD64_R11);
206 nacl_global_codeman_validate(&start, 256, &code);
208 mono_arch_flush_icache (start, code - start);
209 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
212 *info = mono_tramp_info_create ("restore_context", start, code - start, ji, unwind_ops);
218 * mono_arch_get_call_filter:
220 * Returns a pointer to a method which calls an exception filter. We
221 * also use this function to call finally handlers (we pass NULL as
222 * @exc object in this case).
225 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
231 MonoJumpInfo *ji = NULL;
232 GSList *unwind_ops = NULL;
233 const guint kMaxCodeSize = NACL_SIZE (128, 256);
235 start = code = mono_global_codeman_reserve (kMaxCodeSize);
237 /* call_filter (MonoContext *ctx, unsigned long eip) */
240 /* Alloc new frame */
241 amd64_push_reg (code, AMD64_RBP);
242 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
244 /* Save callee saved regs */
246 for (i = 0; i < AMD64_NREG; ++i)
247 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
248 amd64_push_reg (code, i);
254 amd64_push_reg (code, AMD64_RBP);
256 /* Make stack misaligned, the call will make it aligned again */
258 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
261 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rbp), 8);
262 /* load callee saved regs */
263 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rbx), 8);
264 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r12), 8);
265 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r13), 8);
266 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r14), 8);
267 #if !defined(__native_client_codegen__)
268 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, r15), 8);
271 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rdi), 8);
272 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoContext, rsi), 8);
275 /* call the handler */
276 amd64_call_reg (code, AMD64_ARG_REG2);
279 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
282 amd64_pop_reg (code, AMD64_RBP);
284 /* Restore callee saved regs */
285 for (i = AMD64_NREG; i >= 0; --i)
286 if (AMD64_IS_CALLEE_SAVED_REG (i))
287 amd64_pop_reg (code, i);
292 g_assert ((code - start) < kMaxCodeSize);
294 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
296 mono_arch_flush_icache (start, code - start);
297 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
300 *info = mono_tramp_info_create ("call_filter", start, code - start, ji, unwind_ops);
306 * The first few arguments are dummy, to force the other arguments to be passed on
307 * the stack, this avoids overwriting the argument registers in the throw trampoline.
310 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
311 guint64 dummy5, guint64 dummy6,
312 mgreg_t *regs, mgreg_t rip,
313 MonoObject *exc, gboolean rethrow)
317 ctx.rsp = regs [AMD64_RSP];
319 ctx.rbx = regs [AMD64_RBX];
320 ctx.rbp = regs [AMD64_RBP];
321 ctx.r12 = regs [AMD64_R12];
322 ctx.r13 = regs [AMD64_R13];
323 ctx.r14 = regs [AMD64_R14];
324 ctx.r15 = regs [AMD64_R15];
325 ctx.rdi = regs [AMD64_RDI];
326 ctx.rsi = regs [AMD64_RSI];
327 ctx.rax = regs [AMD64_RAX];
328 ctx.rcx = regs [AMD64_RCX];
329 ctx.rdx = regs [AMD64_RDX];
331 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
332 MonoException *mono_ex = (MonoException*)exc;
334 mono_ex->stack_trace = NULL;
337 /* adjust eip so that it point into the call instruction */
340 mono_handle_exception (&ctx, exc);
341 mono_restore_context (&ctx);
342 g_assert_not_reached ();
346 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
347 guint64 dummy5, guint64 dummy6,
348 mgreg_t *regs, mgreg_t rip,
349 guint32 ex_token_index, gint64 pc_offset)
351 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
354 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
358 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
361 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE);
365 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
366 guint64 dummy5, guint64 dummy6,
367 mgreg_t *regs, mgreg_t rip,
368 guint32 dummy7, gint64 dummy8)
370 /* Only the register parameters are valid */
373 ctx.rsp = regs [AMD64_RSP];
375 ctx.rbx = regs [AMD64_RBX];
376 ctx.rbp = regs [AMD64_RBP];
377 ctx.r12 = regs [AMD64_R12];
378 ctx.r13 = regs [AMD64_R13];
379 ctx.r14 = regs [AMD64_R14];
380 ctx.r15 = regs [AMD64_R15];
381 ctx.rdi = regs [AMD64_RDI];
382 ctx.rsi = regs [AMD64_RSI];
383 ctx.rax = regs [AMD64_RAX];
384 ctx.rcx = regs [AMD64_RCX];
385 ctx.rdx = regs [AMD64_RDX];
387 mono_resume_unwind (&ctx);
391 * get_throw_trampoline:
393 * Generate a call to mono_amd64_throw_exception/
394 * mono_amd64_throw_corlib_exception.
397 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
401 MonoJumpInfo *ji = NULL;
402 GSList *unwind_ops = NULL;
403 int i, stack_size, arg_offsets [16], regs_offset, dummy_stack_space;
404 const guint kMaxCodeSize = NACL_SIZE (256, 512);
407 dummy_stack_space = 6 * sizeof(mgreg_t); /* Windows expects stack space allocated for all 6 dummy args. */
409 dummy_stack_space = 0;
412 start = code = mono_global_codeman_reserve (kMaxCodeSize);
414 /* The stack is unaligned on entry */
415 stack_size = 192 + 8 + dummy_stack_space;
420 unwind_ops = mono_arch_get_cie_program ();
423 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
425 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
428 * To hide linux/windows calling convention differences, we pass all arguments on
429 * the stack by passing 6 dummy values in registers.
432 arg_offsets [0] = dummy_stack_space + 0;
433 arg_offsets [1] = dummy_stack_space + sizeof(mgreg_t);
434 arg_offsets [2] = dummy_stack_space + sizeof(mgreg_t) * 2;
435 arg_offsets [3] = dummy_stack_space + sizeof(mgreg_t) * 3;
436 regs_offset = dummy_stack_space + sizeof(mgreg_t) * 4;
439 for (i = 0; i < AMD64_NREG; ++i)
441 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
443 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
444 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
445 /* Set arg1 == regs */
446 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
447 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
448 /* Set arg2 == eip */
450 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
452 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
453 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
454 /* Set arg3 == exc/ex_token_index */
456 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
458 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
459 /* Set arg4 == rethrow/pc offset */
461 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
463 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
466 * The caller is LLVM code which passes the absolute address not a pc offset,
467 * so compensate by passing 0 as 'rip' and passing the negated abs address as
470 amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
472 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
476 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
477 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
479 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? ((gpointer)mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
481 amd64_call_reg (code, AMD64_R11);
482 amd64_breakpoint (code);
484 mono_arch_flush_icache (start, code - start);
486 g_assert ((code - start) < kMaxCodeSize);
488 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
489 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
492 *info = mono_tramp_info_create (tramp_name, start, code - start, ji, unwind_ops);
498 * mono_arch_get_throw_exception:
500 * Returns a function pointer which can be used to raise
501 * exceptions. The returned function has the following
502 * signature: void (*func) (MonoException *exc);
506 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
508 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
512 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
514 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
518 * mono_arch_get_throw_corlib_exception:
520 * Returns a function pointer which can be used to raise
521 * corlib exceptions. The returned function has the following
522 * signature: void (*func) (guint32 ex_token, guint32 offset);
523 * Here, offset is the offset which needs to be substracted from the caller IP
524 * to get the IP of the throw. Passing the offset has the advantage that it
525 * needs no relocations in the caller.
528 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
530 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
534 * mono_arch_find_jit_info:
536 * This function is used to gather information from @ctx, and store it in @frame_info.
537 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
538 * is modified if needed.
539 * Returns TRUE on success, FALSE otherwise.
542 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
543 MonoJitInfo *ji, MonoContext *ctx,
544 MonoContext *new_ctx, MonoLMF **lmf,
545 mgreg_t **save_locations,
546 StackFrameInfo *frame)
548 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
550 memset (frame, 0, sizeof (StackFrameInfo));
556 mgreg_t regs [MONO_MAX_IREGS + 1];
558 guint32 unwind_info_len;
560 guint8 *epilog = NULL;
562 frame->type = FRAME_TYPE_MANAGED;
564 unwind_info = mono_jinfo_get_unwind_info (ji, &unwind_info_len);
566 frame->unwind_info = unwind_info;
567 frame->unwind_info_len = unwind_info_len;
570 printf ("%s %p %p\n", ji->d.method->name, ji->code_start, ip);
571 mono_print_unwind_info (unwind_info, unwind_info_len);
573 /* LLVM compiled code doesn't have this info */
574 if (ji->has_arch_eh_info)
575 epilog = (guint8*)ji->code_start + ji->code_size - mono_jinfo_get_epilog_size (ji);
577 regs [AMD64_RAX] = new_ctx->rax;
578 regs [AMD64_RBX] = new_ctx->rbx;
579 regs [AMD64_RCX] = new_ctx->rcx;
580 regs [AMD64_RDX] = new_ctx->rdx;
581 regs [AMD64_RBP] = new_ctx->rbp;
582 regs [AMD64_RSP] = new_ctx->rsp;
583 regs [AMD64_RSI] = new_ctx->rsi;
584 regs [AMD64_RDI] = new_ctx->rdi;
585 regs [AMD64_RIP] = new_ctx->rip;
586 regs [AMD64_R12] = new_ctx->r12;
587 regs [AMD64_R13] = new_ctx->r13;
588 regs [AMD64_R14] = new_ctx->r14;
589 regs [AMD64_R15] = new_ctx->r15;
591 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
592 (guint8*)ji->code_start + ji->code_size,
593 ip, epilog ? &epilog : NULL, regs, MONO_MAX_IREGS + 1,
594 save_locations, MONO_MAX_IREGS, &cfa);
596 new_ctx->rax = regs [AMD64_RAX];
597 new_ctx->rbx = regs [AMD64_RBX];
598 new_ctx->rcx = regs [AMD64_RCX];
599 new_ctx->rdx = regs [AMD64_RDX];
600 new_ctx->rbp = regs [AMD64_RBP];
601 new_ctx->rsp = regs [AMD64_RSP];
602 new_ctx->rsi = regs [AMD64_RSI];
603 new_ctx->rdi = regs [AMD64_RDI];
604 new_ctx->rip = regs [AMD64_RIP];
605 new_ctx->r12 = regs [AMD64_R12];
606 new_ctx->r13 = regs [AMD64_R13];
607 new_ctx->r14 = regs [AMD64_R14];
608 new_ctx->r15 = regs [AMD64_R15];
610 /* The CFA becomes the new SP value */
611 new_ctx->rsp = (mgreg_t)cfa;
620 if (((guint64)(*lmf)->previous_lmf) & 2) {
622 * This LMF entry is created by the soft debug code to mark transitions to
623 * managed code done during invokes.
625 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
627 g_assert (ext->debugger_invoke);
629 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
631 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
633 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
638 if (((guint64)(*lmf)->previous_lmf) & 1) {
639 /* This LMF has the rip field set */
641 } else if ((*lmf)->rsp == 0) {
646 * The rsp field is set just before the call which transitioned to native
647 * code. Obtain the rip from the stack.
649 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
652 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
654 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
655 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
666 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
669 new_ctx->rbp = (*lmf)->rbp;
670 new_ctx->rsp = (*lmf)->rsp;
672 if (((guint64)(*lmf)->previous_lmf) & 4) {
673 MonoLMFTramp *ext = (MonoLMFTramp*)(*lmf);
675 /* Trampoline frame */
676 new_ctx->rbx = ext->regs [AMD64_RBX];
677 new_ctx->r12 = ext->regs [AMD64_R12];
678 new_ctx->r13 = ext->regs [AMD64_R13];
679 new_ctx->r14 = ext->regs [AMD64_R14];
680 new_ctx->r15 = ext->regs [AMD64_R15];
682 new_ctx->rdi = ext->regs [AMD64_RDI];
683 new_ctx->rsi = ext->regs [AMD64_RSI];
687 * The registers saved in the LMF will be restored using the normal unwind info,
688 * when the wrapper frame is processed.
701 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~7);
712 * Called by resuming from a signal handler.
715 handle_signal_exception (gpointer obj)
717 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
720 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
722 mono_handle_exception (&ctx, obj);
724 mono_restore_context (&ctx);
728 mono_arch_setup_async_callback (MonoContext *ctx, void (*async_cb)(void *fun), gpointer user_data)
730 guint64 sp = ctx->rsp;
732 ctx->rdi = (guint64)user_data;
734 /* Allocate a stack frame below the red zone */
736 /* The stack should be unaligned */
740 /* Preserve the call chain to prevent crashes in the libgcc unwinder (#15969) */
741 *(guint64*)sp = ctx->rip;
744 ctx->rip = (guint64)async_cb;
748 * mono_arch_handle_exception:
750 * @ctx: saved processor state
751 * @obj: the exception object
754 mono_arch_handle_exception (void *sigctx, gpointer obj)
756 #if defined(MONO_ARCH_USE_SIGACTION)
760 * Handling the exception in the signal handler is problematic, since the original
761 * signal is disabled, and we could run arbitrary code though the debugger. So
762 * resume into the normal stack and do most work there if possible.
764 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
766 /* Pass the ctx parameter in TLS */
767 mono_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
769 mctx = jit_tls->ex_ctx;
770 mono_arch_setup_async_callback (&mctx, handle_signal_exception, obj);
771 mono_monoctx_to_sigctx (&mctx, sigctx);
777 mono_sigctx_to_monoctx (sigctx, &mctx);
779 mono_handle_exception (&mctx, obj);
781 mono_monoctx_to_sigctx (&mctx, sigctx);
788 mono_arch_ip_from_context (void *sigctx)
790 #if defined(MONO_ARCH_USE_SIGACTION)
791 ucontext_t *ctx = (ucontext_t*)sigctx;
793 return (gpointer)UCONTEXT_REG_RIP (ctx);
794 #elif defined(HOST_WIN32)
795 return ((CONTEXT*)sigctx)->Rip;
797 MonoContext *ctx = sigctx;
798 return (gpointer)ctx->rip;
803 restore_soft_guard_pages (void)
805 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
806 if (jit_tls->stack_ovf_guard_base)
807 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
811 * this function modifies mctx so that when it is restored, it
812 * won't execcute starting at mctx.eip, but in a function that
813 * will restore the protection on the soft-guard pages and return back to
814 * continue at mctx.eip.
817 prepare_for_guard_pages (MonoContext *mctx)
820 sp = (gpointer)(mctx->rsp);
822 /* the return addr */
823 sp [0] = (gpointer)(mctx->rip);
824 mctx->rip = (guint64)restore_soft_guard_pages;
825 mctx->rsp = (guint64)sp;
829 altstack_handle_and_restore (MonoContext *ctx, gpointer obj, gboolean stack_ovf)
835 mono_handle_exception (&mctx, obj);
837 prepare_for_guard_pages (&mctx);
838 mono_restore_context (&mctx);
842 mono_arch_handle_altstack_exception (void *sigctx, MONO_SIG_HANDLER_INFO_TYPE *siginfo, gpointer fault_addr, gboolean stack_ovf)
844 #if defined(MONO_ARCH_USE_SIGACTION)
845 MonoException *exc = NULL;
846 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL);
849 MonoContext *copied_ctx;
852 exc = mono_domain_get ()->stack_overflow_ex;
854 mono_handle_native_sigsegv (SIGSEGV, sigctx, siginfo);
856 /* setup a call frame on the real stack so that control is returned there
857 * and exception handling can continue.
858 * The frame looks like:
862 * 128 is the size of the red zone
864 frame_size = sizeof (MonoContext) + sizeof (gpointer) * 4 + 128;
867 sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15);
868 sp = (gpointer)((char*)sp - frame_size);
869 copied_ctx = (MonoContext*)(sp + 4);
870 /* the arguments must be aligned */
871 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
872 mono_sigctx_to_monoctx (sigctx, copied_ctx);
873 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
874 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
875 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
876 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(copied_ctx);
877 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
878 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
883 mono_amd64_get_original_ip (void)
885 MonoLMF *lmf = mono_get_lmf ();
889 /* Reset the change to previous_lmf */
890 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
896 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
898 guint8 *code, *start;
900 gpointer throw_trampoline;
901 MonoJumpInfo *ji = NULL;
902 GSList *unwind_ops = NULL;
903 const guint kMaxCodeSize = NACL_SIZE (128, 256);
905 start = code = mono_global_codeman_reserve (kMaxCodeSize);
907 /* We are in the frame of a managed method after a call */
909 * We would like to throw the pending exception in such a way that it looks to
910 * be thrown from the managed method.
913 /* Save registers which might contain the return value of the call */
914 amd64_push_reg (code, AMD64_RAX);
915 amd64_push_reg (code, AMD64_RDX);
917 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
918 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
921 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
923 /* Obtain the pending exception */
925 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
926 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
928 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
930 amd64_call_reg (code, AMD64_R11);
932 /* Check if it is NULL, and branch */
933 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
934 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
936 /* exc != NULL branch */
938 /* Save the exc on the stack */
939 amd64_push_reg (code, AMD64_RAX);
941 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
943 /* Obtain the original ip and clear the flag in previous_lmf */
945 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
946 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
948 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
950 amd64_call_reg (code, AMD64_R11);
953 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
955 /* Pop saved stuff from the stack */
956 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
958 /* Setup arguments for the throw trampoline */
960 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
961 /* The trampoline expects the caller ip to be pushed on the stack */
962 amd64_push_reg (code, AMD64_RAX);
964 /* Call the throw trampoline */
966 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
967 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
969 throw_trampoline = mono_get_throw_exception ();
970 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
972 /* We use a jump instead of a call so we can push the original ip on the stack */
973 amd64_jump_reg (code, AMD64_R11);
975 /* ex == NULL branch */
976 mono_amd64_patch (br [0], code);
978 /* Obtain the original ip and clear the flag in previous_lmf */
980 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
981 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
983 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
985 amd64_call_reg (code, AMD64_R11);
986 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
988 /* Restore registers */
989 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
990 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
991 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
992 amd64_pop_reg (code, AMD64_RDX);
993 amd64_pop_reg (code, AMD64_RAX);
995 /* Return to original code */
996 amd64_jump_reg (code, AMD64_R11);
998 g_assert ((code - start) < kMaxCodeSize);
1000 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1001 mono_arch_flush_icache (start, code - start);
1002 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1005 *info = mono_tramp_info_create ("throw_pending_exception", start, code - start, ji, unwind_ops);
1010 static gpointer throw_pending_exception;
1013 * Called when a thread receives an async exception while executing unmanaged code.
1014 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1015 * the return address on the stack to point to a helper routine which throws the
1019 mono_arch_notify_pending_exc (MonoThreadInfo *info)
1021 MonoLMF *lmf = mono_get_lmf ();
1024 lmf = mono_get_lmf ();
1026 g_assert (mono_thread_info_get_suspend_state (info)->valid);
1027 lmf = mono_thread_info_get_suspend_state (info)->unwind_data [MONO_UNWIND_DATA_LMF];
1031 /* Not yet started */
1038 if ((guint64)lmf->previous_lmf & 1)
1039 /* Already hijacked or trampoline LMF entry */
1042 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1043 lmf->rip = *(guint64*)(lmf->rsp - 8);
1044 /* Signal that lmf->rip is set */
1045 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1047 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1051 mono_amd64_get_exception_trampolines (gboolean aot)
1053 MonoTrampInfo *info;
1054 GSList *tramps = NULL;
1056 mono_arch_get_throw_pending_exception (&info, aot);
1057 tramps = g_slist_prepend (tramps, info);
1059 /* LLVM needs different throw trampolines */
1060 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
1061 tramps = g_slist_prepend (tramps, info);
1063 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
1064 tramps = g_slist_prepend (tramps, info);
1066 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
1067 tramps = g_slist_prepend (tramps, info);
1073 mono_arch_exceptions_init (void)
1078 if (mono_aot_only) {
1079 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1080 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
1081 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
1082 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
1083 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
1084 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
1085 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
1087 /* Call this to avoid initialization races */
1088 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1090 tramps = mono_amd64_get_exception_trampolines (FALSE);
1091 for (l = tramps; l; l = l->next) {
1092 MonoTrampInfo *info = l->data;
1094 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
1095 mono_tramp_info_register (info);
1097 g_slist_free (tramps);
1104 * The mono_arch_unwindinfo* methods are used to build and add
1105 * function table info for each emitted method from mono. On Winx64
1106 * the seh handler will not be called if the mono methods are not
1107 * added to the function table.
1109 * We should not need to add non-volatile register info to the
1110 * table since mono stores that info elsewhere. (Except for the register
1114 #define MONO_MAX_UNWIND_CODES 22
1116 typedef union _UNWIND_CODE {
1119 guchar UnwindOp : 4;
1122 gushort FrameOffset;
1123 } UNWIND_CODE, *PUNWIND_CODE;
1125 typedef struct _UNWIND_INFO {
1128 guchar SizeOfProlog;
1129 guchar CountOfCodes;
1130 guchar FrameRegister : 4;
1131 guchar FrameOffset : 4;
1132 /* custom size for mono allowing for mono allowing for*/
1133 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1134 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1135 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1136 /*UWOP_PUSH_NONVOL offset = 15-0*/
1137 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1139 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1141 * OPTIONAL ULONG ExceptionHandler;
1142 * OPTIONAL ULONG FunctionEntry;
1144 * OPTIONAL ULONG ExceptionData[]; */
1145 } UNWIND_INFO, *PUNWIND_INFO;
1149 RUNTIME_FUNCTION runtimeFunction;
1150 UNWIND_INFO unwindInfo;
1151 } MonoUnwindInfo, *PMonoUnwindInfo;
1154 mono_arch_unwindinfo_create (gpointer* monoui)
1156 PMonoUnwindInfo newunwindinfo;
1157 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1158 newunwindinfo->unwindInfo.Version = 1;
1162 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1164 PMonoUnwindInfo unwindinfo;
1165 PUNWIND_CODE unwindcode;
1168 mono_arch_unwindinfo_create (monoui);
1170 unwindinfo = (MonoUnwindInfo*)*monoui;
1172 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1173 g_error ("Larger allocation needed for the unwind information.");
1175 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1176 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1177 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1178 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1179 unwindcode->OpInfo = reg;
1181 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1182 g_error ("Adding unwind info in wrong order.");
1184 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1188 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1190 PMonoUnwindInfo unwindinfo;
1191 PUNWIND_CODE unwindcode;
1194 mono_arch_unwindinfo_create (monoui);
1196 unwindinfo = (MonoUnwindInfo*)*monoui;
1198 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1199 g_error ("Larger allocation needed for the unwind information.");
1201 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1202 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1203 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1205 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1206 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1207 unwindcode->OpInfo = reg;
1209 unwindinfo->unwindInfo.FrameRegister = reg;
1211 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1212 g_error ("Adding unwind info in wrong order.");
1214 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1218 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1220 PMonoUnwindInfo unwindinfo;
1221 PUNWIND_CODE unwindcode;
1225 mono_arch_unwindinfo_create (monoui);
1227 unwindinfo = (MonoUnwindInfo*)*monoui;
1230 g_error ("Stack allocation must be equal to or greater than 0x8.");
1234 else if (size <= 0x7FFF8)
1239 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1240 g_error ("Larger allocation needed for the unwind information.");
1242 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1243 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1245 if (codesneeded == 1) {
1246 /*The size of the allocation is
1247 (the number in the OpInfo member) times 8 plus 8*/
1248 unwindcode->OpInfo = (size - 8)/8;
1249 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1252 if (codesneeded == 3) {
1253 /*the unscaled size of the allocation is recorded
1254 in the next two slots in little-endian format*/
1255 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1257 unwindcode->OpInfo = 1;
1260 /*the size of the allocation divided by 8
1261 is recorded in the next slot*/
1262 unwindcode->FrameOffset = size/8;
1264 unwindcode->OpInfo = 0;
1267 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1270 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1272 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1273 g_error ("Adding unwind info in wrong order.");
1275 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1279 mono_arch_unwindinfo_get_size (gpointer monoui)
1281 PMonoUnwindInfo unwindinfo;
1285 unwindinfo = (MonoUnwindInfo*)monoui;
1286 return (8 + sizeof (MonoUnwindInfo)) -
1287 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1290 static PRUNTIME_FUNCTION
1291 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1295 PMonoUnwindInfo targetinfo;
1296 MonoDomain *domain = mono_domain_get ();
1298 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1302 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1304 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1306 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1308 return &targetinfo->runtimeFunction;
1312 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1314 PMonoUnwindInfo unwindinfo, targetinfo;
1316 guint64 targetlocation;
1320 unwindinfo = (MonoUnwindInfo*)*monoui;
1321 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1322 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1324 unwindinfo->runtimeFunction.EndAddress = code_size;
1325 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1327 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1329 codecount = unwindinfo->unwindInfo.CountOfCodes;
1331 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1332 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1335 g_free (unwindinfo);
1338 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1343 #if MONO_SUPPORT_TASKLETS
1344 MonoContinuationRestore
1345 mono_tasklets_arch_restore (void)
1347 static guint8* saved = NULL;
1348 guint8 *code, *start;
1349 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1350 const guint kMaxCodeSize = NACL_SIZE (64, 128);
1354 return (MonoContinuationRestore)saved;
1355 code = start = mono_global_codeman_reserve (kMaxCodeSize);
1356 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1357 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1358 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1359 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1360 * We move cont to cont_reg since we need both rcx and rdi for the copy
1361 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1363 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1364 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1365 /* setup the copy of the stack */
1366 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1367 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1369 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1370 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1371 amd64_prefix (code, X86_REP_PREFIX);
1374 /* now restore the registers from the LMF */
1376 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1377 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, MONO_STRUCT_OFFSET (MonoLMF, rsp), 8);
1379 /* restore the lmf chain */
1380 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1381 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1383 /* state is already in rax */
1384 amd64_jump_membase (code, cont_reg, MONO_STRUCT_OFFSET (MonoContinuation, return_ip));
1385 g_assert ((code - start) <= kMaxCodeSize);
1387 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1388 mono_arch_flush_icache (start, code - start);
1389 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1392 return (MonoContinuationRestore)saved;
1397 * mono_arch_setup_resume_sighandler_ctx:
1399 * Setup CTX so execution continues at FUNC.
1402 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1405 * When resuming from a signal handler, the stack should be misaligned, just like right after
1408 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1409 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1410 MONO_CONTEXT_SET_IP (ctx, func);