2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler(0, er, sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
97 /* Copy context back */
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
134 fpe_handler = handler;
137 ill_handler = handler;
140 segv_handler = handler;
147 #endif /* TARGET_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
155 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
157 guint8 *start = NULL;
159 MonoJumpInfo *ji = NULL;
160 GSList *unwind_ops = NULL;
162 /* restore_contect (MonoContext *ctx) */
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
184 if (mono_running_on_valgrind ()) {
185 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
186 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
187 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
188 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
190 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
191 /* get return address */
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
201 *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
207 * mono_arch_get_call_filter:
209 * Returns a pointer to a method which calls an exception filter. We
210 * also use this function to call finally handlers (we pass NULL as
211 * @exc object in this case).
214 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
220 MonoJumpInfo *ji = NULL;
221 GSList *unwind_ops = NULL;
223 start = code = mono_global_codeman_reserve (128);
225 /* call_filter (MonoContext *ctx, unsigned long eip) */
228 /* Alloc new frame */
229 amd64_push_reg (code, AMD64_RBP);
230 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
232 /* Save callee saved regs */
234 for (i = 0; i < AMD64_NREG; ++i)
235 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
236 amd64_push_reg (code, i);
242 amd64_push_reg (code, AMD64_RBP);
244 /* Make stack misaligned, the call will make it aligned again */
246 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
249 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
250 /* load callee saved regs */
251 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
252 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
253 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
254 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
255 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
257 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
258 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
261 /* call the handler */
262 amd64_call_reg (code, AMD64_ARG_REG2);
265 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
268 amd64_pop_reg (code, AMD64_RBP);
270 /* Restore callee saved regs */
271 for (i = AMD64_NREG; i >= 0; --i)
272 if (AMD64_IS_CALLEE_SAVED_REG (i))
273 amd64_pop_reg (code, i);
278 g_assert ((code - start) < 128);
280 mono_arch_flush_icache (start, code - start);
283 *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
289 * The first few arguments are dummy, to force the other arguments to be passed on
290 * the stack, this avoids overwriting the argument registers in the throw trampoline.
293 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
294 guint64 dummy5, guint64 dummy6,
295 MonoObject *exc, guint64 rip, guint64 rsp,
296 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
297 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
298 guint64 rax, guint64 rcx, guint64 rdx,
301 static void (*restore_context) (MonoContext *);
304 if (!restore_context)
305 restore_context = mono_get_restore_context ();
321 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
322 MonoException *mono_ex = (MonoException*)exc;
324 mono_ex->stack_trace = NULL;
327 if (mono_debug_using_mono_debugger ()) {
328 guint8 buf [16], *code;
330 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
333 if (buf [3] == 0xe8) {
334 MonoContext ctx_cp = ctx;
335 ctx_cp.rip = rip - 5;
337 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
338 restore_context (&ctx_cp);
339 g_assert_not_reached ();
344 /* adjust eip so that it point into the call instruction */
347 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
348 restore_context (&ctx);
350 g_assert_not_reached ();
354 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
355 guint64 dummy5, guint64 dummy6,
356 guint32 ex_token_index,
357 guint64 rip, guint64 rsp,
358 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
359 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
360 guint64 rax, guint64 rcx, guint64 rdx,
363 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
366 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
370 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
373 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, (MonoObject*)ex, rip, rsp, rbx, rbp, r12, r13, r14, r15, rdi, rsi, rax, rcx, rdx, FALSE);
377 * get_throw_trampoline:
379 * Generate a call to mono_amd64_throw_exception/
380 * mono_amd64_throw_corlib_exception.
383 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean aot)
387 MonoJumpInfo *ji = NULL;
388 GSList *unwind_ops = NULL;
390 start = code = mono_global_codeman_reserve (64);
394 unwind_ops = mono_arch_get_cie_program ();
396 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
400 amd64_push_reg (code, AMD64_ARG_REG2);
403 * The caller is LLVM code which passes the absolute address not a pc offset,
404 * so compensate by passing 0 as 'rip' and passing the negated abs address as
407 amd64_neg_membase (code, X86_ESP, 0);
409 amd64_push_imm (code, rethrow);
411 amd64_push_reg (code, AMD64_RDX);
412 amd64_push_reg (code, AMD64_RCX);
413 amd64_push_reg (code, AMD64_RAX);
414 amd64_push_reg (code, AMD64_RSI);
415 amd64_push_reg (code, AMD64_RDI);
416 amd64_push_reg (code, AMD64_R15);
417 amd64_push_reg (code, AMD64_R14);
418 amd64_push_reg (code, AMD64_R13);
419 amd64_push_reg (code, AMD64_R12);
420 amd64_push_reg (code, AMD64_RBP);
421 amd64_push_reg (code, AMD64_RBX);
424 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
425 amd64_push_reg (code, AMD64_RAX);
429 amd64_push_imm (code, 0);
431 amd64_push_membase (code, AMD64_R11, 0);
435 amd64_push_reg (code, AMD64_ARG_REG1);
438 amd64_push_reg (code, AMD64_ARG_REG1);
440 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, (15 + 1) * sizeof (gpointer));
444 amd64_push_imm (code, 0);
445 amd64_push_imm (code, 0);
446 amd64_push_imm (code, 0);
447 amd64_push_imm (code, 0);
448 amd64_push_imm (code, 0);
449 amd64_push_imm (code, 0);
453 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? (llvm_abs ? "mono_amd64_throw_corlib_exception_abs" : "mono_amd64_throw_corlib_exception") : "mono_amd64_throw_exception");
454 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
456 amd64_mov_reg_imm (code, AMD64_R11, corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception);
458 amd64_call_reg (code, AMD64_R11);
459 amd64_breakpoint (code);
461 mono_arch_flush_icache (start, code - start);
463 g_assert ((code - start) < 64);
465 mono_save_trampoline_xdebug_info (corlib ? (llvm_abs ? "throw_corlib_exception_trampoline_llvm_abs" : "throw_corlib_exception_trampoline")
466 : "throw_exception_trampoline", start, code - start, unwind_ops);
469 *info = mono_tramp_info_create (g_strdup_printf (corlib ? (llvm_abs ? "throw_corlib_exception_llvm_abs" : "throw_corlib_exception") : (rethrow ? "rethrow_exception" : "throw_exception")), start, code - start, ji, unwind_ops);
475 * mono_arch_get_throw_exception:
477 * Returns a function pointer which can be used to raise
478 * exceptions. The returned function has the following
479 * signature: void (*func) (MonoException *exc);
483 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
485 return get_throw_trampoline (info, FALSE, FALSE, FALSE, aot);
489 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
491 return get_throw_trampoline (info, TRUE, FALSE, FALSE, aot);
495 * mono_arch_get_throw_corlib_exception:
497 * Returns a function pointer which can be used to raise
498 * corlib exceptions. The returned function has the following
499 * signature: void (*func) (guint32 ex_token, guint32 offset);
500 * Here, offset is the offset which needs to be substracted from the caller IP
501 * to get the IP of the throw. Passing the offset has the advantage that it
502 * needs no relocations in the caller.
505 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
507 return get_throw_trampoline (info, FALSE, TRUE, FALSE, aot);
511 * mono_arch_find_jit_info_ext:
513 * This function is used to gather information from @ctx, and store it in @frame_info.
514 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
515 * is modified if needed.
516 * Returns TRUE on success, FALSE otherwise.
517 * This function is a version of mono_arch_find_jit_info () where all the results are
518 * returned in a StackFrameInfo structure.
521 mono_arch_find_jit_info_ext (MonoDomain *domain, MonoJitTlsData *jit_tls,
522 MonoJitInfo *ji, MonoContext *ctx,
523 MonoContext *new_ctx, MonoLMF **lmf,
524 StackFrameInfo *frame)
526 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
528 memset (frame, 0, sizeof (StackFrameInfo));
530 frame->managed = FALSE;
535 gssize regs [MONO_MAX_IREGS + 1];
537 guint32 unwind_info_len;
540 frame->type = FRAME_TYPE_MANAGED;
542 if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)
543 frame->managed = TRUE;
546 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
548 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
550 regs [AMD64_RAX] = new_ctx->rax;
551 regs [AMD64_RBX] = new_ctx->rbx;
552 regs [AMD64_RCX] = new_ctx->rcx;
553 regs [AMD64_RDX] = new_ctx->rdx;
554 regs [AMD64_RBP] = new_ctx->rbp;
555 regs [AMD64_RSP] = new_ctx->rsp;
556 regs [AMD64_RSI] = new_ctx->rsi;
557 regs [AMD64_RDI] = new_ctx->rdi;
558 regs [AMD64_RIP] = new_ctx->rip;
559 regs [AMD64_R12] = new_ctx->r12;
560 regs [AMD64_R13] = new_ctx->r13;
561 regs [AMD64_R14] = new_ctx->r14;
562 regs [AMD64_R15] = new_ctx->r15;
564 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
565 (guint8*)ji->code_start + ji->code_size,
566 ip, regs, MONO_MAX_IREGS + 1, &cfa);
568 new_ctx->rax = regs [AMD64_RAX];
569 new_ctx->rbx = regs [AMD64_RBX];
570 new_ctx->rcx = regs [AMD64_RCX];
571 new_ctx->rdx = regs [AMD64_RDX];
572 new_ctx->rbp = regs [AMD64_RBP];
573 new_ctx->rsp = regs [AMD64_RSP];
574 new_ctx->rsi = regs [AMD64_RSI];
575 new_ctx->rdi = regs [AMD64_RDI];
576 new_ctx->rip = regs [AMD64_RIP];
577 new_ctx->r12 = regs [AMD64_R12];
578 new_ctx->r13 = regs [AMD64_R13];
579 new_ctx->r14 = regs [AMD64_R14];
580 new_ctx->r15 = regs [AMD64_R15];
582 /* The CFA becomes the new SP value */
583 new_ctx->rsp = (gssize)cfa;
588 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
589 /* remove any unused lmf */
590 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
593 #ifndef MONO_AMD64_NO_PUSHES
594 /* Pop arguments off the stack */
596 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
598 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
599 new_ctx->rsp += stack_to_pop;
607 if (((guint64)(*lmf)->previous_lmf) & 2) {
609 * This LMF entry is created by the soft debug code to mark transitions to
610 * managed code done during invokes.
612 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
614 g_assert (ext->debugger_invoke);
616 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
618 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
620 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
625 if (((guint64)(*lmf)->previous_lmf) & 1) {
626 /* This LMF has the rip field set */
628 } else if ((*lmf)->rsp == 0) {
633 * The rsp field is set just before the call which transitioned to native
634 * code. Obtain the rip from the stack.
636 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
639 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
641 // FIXME: This can happen with multiple appdomains (bug #444383)
649 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
652 new_ctx->rbp = (*lmf)->rbp;
653 new_ctx->rsp = (*lmf)->rsp;
655 new_ctx->rbx = (*lmf)->rbx;
656 new_ctx->r12 = (*lmf)->r12;
657 new_ctx->r13 = (*lmf)->r13;
658 new_ctx->r14 = (*lmf)->r14;
659 new_ctx->r15 = (*lmf)->r15;
661 new_ctx->rdi = (*lmf)->rdi;
662 new_ctx->rsi = (*lmf)->rsi;
665 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
676 * Called by resuming from a signal handler.
679 handle_signal_exception (gpointer obj, gboolean test_only)
681 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
683 static void (*restore_context) (MonoContext *);
685 if (!restore_context)
686 restore_context = mono_get_restore_context ();
688 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
690 if (mono_debugger_handle_exception (&ctx, (MonoObject *)obj))
693 mono_handle_exception (&ctx, obj, MONO_CONTEXT_GET_IP (&ctx), test_only);
695 restore_context (&ctx);
699 * mono_arch_handle_exception:
701 * @ctx: saved processor state
702 * @obj: the exception object
705 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
707 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
709 * Handling the exception in the signal handler is problematic, since the original
710 * signal is disabled, and we could run arbitrary code though the debugger. So
711 * resume into the normal stack and do most work there if possible.
713 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
714 guint64 sp = UCONTEXT_REG_RSP (sigctx);
716 /* Pass the ctx parameter in TLS */
717 mono_arch_sigctx_to_monoctx (sigctx, &jit_tls->ex_ctx);
718 /* The others in registers */
719 UCONTEXT_REG_RDI (sigctx) = (guint64)obj;
720 UCONTEXT_REG_RSI (sigctx) = test_only;
722 /* Allocate a stack frame below the red zone */
724 /* The stack should be unaligned */
727 UCONTEXT_REG_RSP (sigctx) = sp;
729 UCONTEXT_REG_RIP (sigctx) = (guint64)handle_signal_exception;
735 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
737 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
740 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
742 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
748 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
749 static inline guint64*
750 gregs_from_ucontext (ucontext_t *ctx)
752 return (guint64 *) UCONTEXT_GREGS (ctx);
756 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
758 #if defined(MONO_ARCH_USE_SIGACTION)
759 ucontext_t *ctx = (ucontext_t*)sigctx;
761 mctx->rax = UCONTEXT_REG_RAX (ctx);
762 mctx->rbx = UCONTEXT_REG_RBX (ctx);
763 mctx->rcx = UCONTEXT_REG_RCX (ctx);
764 mctx->rdx = UCONTEXT_REG_RDX (ctx);
765 mctx->rbp = UCONTEXT_REG_RBP (ctx);
766 mctx->rsp = UCONTEXT_REG_RSP (ctx);
767 mctx->rsi = UCONTEXT_REG_RSI (ctx);
768 mctx->rdi = UCONTEXT_REG_RDI (ctx);
769 mctx->rip = UCONTEXT_REG_RIP (ctx);
770 mctx->r12 = UCONTEXT_REG_R12 (ctx);
771 mctx->r13 = UCONTEXT_REG_R13 (ctx);
772 mctx->r14 = UCONTEXT_REG_R14 (ctx);
773 mctx->r15 = UCONTEXT_REG_R15 (ctx);
775 MonoContext *ctx = (MonoContext *)sigctx;
777 mctx->rax = ctx->rax;
778 mctx->rbx = ctx->rbx;
779 mctx->rcx = ctx->rcx;
780 mctx->rdx = ctx->rdx;
781 mctx->rbp = ctx->rbp;
782 mctx->rsp = ctx->rsp;
783 mctx->rsi = ctx->rsi;
784 mctx->rdi = ctx->rdi;
785 mctx->rip = ctx->rip;
786 mctx->r12 = ctx->r12;
787 mctx->r13 = ctx->r13;
788 mctx->r14 = ctx->r14;
789 mctx->r15 = ctx->r15;
794 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
796 #if defined(MONO_ARCH_USE_SIGACTION)
797 ucontext_t *ctx = (ucontext_t*)sigctx;
799 UCONTEXT_REG_RAX (ctx) = mctx->rax;
800 UCONTEXT_REG_RBX (ctx) = mctx->rbx;
801 UCONTEXT_REG_RCX (ctx) = mctx->rcx;
802 UCONTEXT_REG_RDX (ctx) = mctx->rdx;
803 UCONTEXT_REG_RBP (ctx) = mctx->rbp;
804 UCONTEXT_REG_RSP (ctx) = mctx->rsp;
805 UCONTEXT_REG_RSI (ctx) = mctx->rsi;
806 UCONTEXT_REG_RDI (ctx) = mctx->rdi;
807 UCONTEXT_REG_RIP (ctx) = mctx->rip;
808 UCONTEXT_REG_R12 (ctx) = mctx->r12;
809 UCONTEXT_REG_R13 (ctx) = mctx->r13;
810 UCONTEXT_REG_R14 (ctx) = mctx->r14;
811 UCONTEXT_REG_R15 (ctx) = mctx->r15;
813 MonoContext *ctx = (MonoContext *)sigctx;
815 ctx->rax = mctx->rax;
816 ctx->rbx = mctx->rbx;
817 ctx->rcx = mctx->rcx;
818 ctx->rdx = mctx->rdx;
819 ctx->rbp = mctx->rbp;
820 ctx->rsp = mctx->rsp;
821 ctx->rsi = mctx->rsi;
822 ctx->rdi = mctx->rdi;
823 ctx->rip = mctx->rip;
824 ctx->r12 = mctx->r12;
825 ctx->r13 = mctx->r13;
826 ctx->r14 = mctx->r14;
827 ctx->r15 = mctx->r15;
832 mono_arch_ip_from_context (void *sigctx)
834 #if defined(MONO_ARCH_USE_SIGACTION)
835 ucontext_t *ctx = (ucontext_t*)sigctx;
837 return (gpointer)UCONTEXT_REG_RIP (ctx);
839 MonoContext *ctx = sigctx;
840 return (gpointer)ctx->rip;
845 restore_soft_guard_pages (void)
847 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
848 if (jit_tls->stack_ovf_guard_base)
849 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
853 * this function modifies mctx so that when it is restored, it
854 * won't execcute starting at mctx.eip, but in a function that
855 * will restore the protection on the soft-guard pages and return back to
856 * continue at mctx.eip.
859 prepare_for_guard_pages (MonoContext *mctx)
862 sp = (gpointer)(mctx->rsp);
864 /* the return addr */
865 sp [0] = (gpointer)(mctx->rip);
866 mctx->rip = (guint64)restore_soft_guard_pages;
867 mctx->rsp = (guint64)sp;
871 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
873 void (*restore_context) (MonoContext *);
876 restore_context = mono_get_restore_context ();
877 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
879 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
881 prepare_for_guard_pages (&mctx);
882 restore_context (&mctx);
885 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
887 prepare_for_guard_pages (&mctx);
888 restore_context (&mctx);
892 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
894 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
895 MonoException *exc = NULL;
896 ucontext_t *ctx = (ucontext_t*)sigctx;
897 guint64 *gregs = gregs_from_ucontext (ctx);
898 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP], NULL);
903 exc = mono_domain_get ()->stack_overflow_ex;
905 mono_handle_native_sigsegv (SIGSEGV, sigctx);
907 /* setup a call frame on the real stack so that control is returned there
908 * and exception handling can continue.
909 * The frame looks like:
913 * 128 is the size of the red zone
915 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
918 sp = (gpointer)(gregs [REG_RSP] & ~15);
919 sp = (gpointer)((char*)sp - frame_size);
920 /* the arguments must be aligned */
921 sp [-1] = (gpointer)gregs [REG_RIP];
922 /* may need to adjust pointers in the new struct copy, depending on the OS */
923 memcpy (sp + 4, ctx, sizeof (ucontext_t));
924 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
925 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
926 gregs [REG_RSP] = (unsigned long)(sp - 1);
927 gregs [REG_RDI] = (unsigned long)(sp + 4);
928 gregs [REG_RSI] = (guint64)exc;
929 gregs [REG_RDX] = stack_ovf;
934 mono_amd64_get_original_ip (void)
936 MonoLMF *lmf = mono_get_lmf ();
940 /* Reset the change to previous_lmf */
941 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
947 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
949 guint8 *code, *start;
951 gpointer throw_trampoline;
952 MonoJumpInfo *ji = NULL;
953 GSList *unwind_ops = NULL;
955 start = code = mono_global_codeman_reserve (128);
957 /* We are in the frame of a managed method after a call */
959 * We would like to throw the pending exception in such a way that it looks to
960 * be thrown from the managed method.
963 /* Save registers which might contain the return value of the call */
964 amd64_push_reg (code, AMD64_RAX);
965 amd64_push_reg (code, AMD64_RDX);
967 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
968 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
971 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
973 /* Obtain the pending exception */
975 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
976 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
978 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
980 amd64_call_reg (code, AMD64_R11);
982 /* Check if it is NULL, and branch */
983 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
984 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
986 /* exc != NULL branch */
988 /* Save the exc on the stack */
989 amd64_push_reg (code, AMD64_RAX);
991 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
993 /* Obtain the original ip and clear the flag in previous_lmf */
995 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
996 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
998 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1000 amd64_call_reg (code, AMD64_R11);
1003 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
1005 /* Pop saved stuff from the stack */
1006 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
1008 /* Setup arguments for the throw trampoline */
1010 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
1011 /* The trampoline expects the caller ip to be pushed on the stack */
1012 amd64_push_reg (code, AMD64_RAX);
1014 /* Call the throw trampoline */
1016 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
1017 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1019 throw_trampoline = mono_get_throw_exception ();
1020 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
1022 /* We use a jump instead of a call so we can push the original ip on the stack */
1023 amd64_jump_reg (code, AMD64_R11);
1025 /* ex == NULL branch */
1026 mono_amd64_patch (br [0], code);
1028 /* Obtain the original ip and clear the flag in previous_lmf */
1030 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1031 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1033 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1035 amd64_call_reg (code, AMD64_R11);
1036 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1038 /* Restore registers */
1039 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1040 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1041 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1042 amd64_pop_reg (code, AMD64_RDX);
1043 amd64_pop_reg (code, AMD64_RAX);
1045 /* Return to original code */
1046 amd64_jump_reg (code, AMD64_R11);
1048 g_assert ((code - start) < 128);
1051 *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
1056 static gpointer throw_pending_exception;
1059 * Called when a thread receives an async exception while executing unmanaged code.
1060 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1061 * the return address on the stack to point to a helper routine which throws the
1065 mono_arch_notify_pending_exc (void)
1067 MonoLMF *lmf = mono_get_lmf ();
1070 /* Not yet started */
1077 if ((guint64)lmf->previous_lmf & 1)
1078 /* Already hijacked or trampoline LMF entry */
1081 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1082 lmf->rip = *(guint64*)(lmf->rsp - 8);
1083 /* Signal that lmf->rip is set */
1084 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1086 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1090 mono_arch_exceptions_init (void)
1094 if (mono_aot_only) {
1095 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1097 /* Call this to avoid initialization races */
1098 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1101 /* LLVM needs different throw trampolines */
1102 tramp = get_throw_trampoline (NULL, FALSE, TRUE, FALSE, FALSE);
1103 mono_register_jit_icall (tramp, "mono_arch_llvm_throw_corlib_exception", NULL, TRUE);
1105 tramp = get_throw_trampoline (NULL, FALSE, TRUE, TRUE, FALSE);
1106 mono_register_jit_icall (tramp, "mono_arch_llvm_throw_corlib_exception_abs", NULL, TRUE);
1112 * The mono_arch_unwindinfo* methods are used to build and add
1113 * function table info for each emitted method from mono. On Winx64
1114 * the seh handler will not be called if the mono methods are not
1115 * added to the function table.
1117 * We should not need to add non-volatile register info to the
1118 * table since mono stores that info elsewhere. (Except for the register
1122 #define MONO_MAX_UNWIND_CODES 22
1124 typedef union _UNWIND_CODE {
1127 guchar UnwindOp : 4;
1130 gushort FrameOffset;
1131 } UNWIND_CODE, *PUNWIND_CODE;
1133 typedef struct _UNWIND_INFO {
1136 guchar SizeOfProlog;
1137 guchar CountOfCodes;
1138 guchar FrameRegister : 4;
1139 guchar FrameOffset : 4;
1140 /* custom size for mono allowing for mono allowing for*/
1141 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1142 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1143 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1144 /*UWOP_PUSH_NONVOL offset = 15-0*/
1145 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1147 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1149 * OPTIONAL ULONG ExceptionHandler;
1150 * OPTIONAL ULONG FunctionEntry;
1152 * OPTIONAL ULONG ExceptionData[]; */
1153 } UNWIND_INFO, *PUNWIND_INFO;
1157 RUNTIME_FUNCTION runtimeFunction;
1158 UNWIND_INFO unwindInfo;
1159 } MonoUnwindInfo, *PMonoUnwindInfo;
1162 mono_arch_unwindinfo_create (gpointer* monoui)
1164 PMonoUnwindInfo newunwindinfo;
1165 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1166 newunwindinfo->unwindInfo.Version = 1;
1170 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1172 PMonoUnwindInfo unwindinfo;
1173 PUNWIND_CODE unwindcode;
1176 mono_arch_unwindinfo_create (monoui);
1178 unwindinfo = (MonoUnwindInfo*)*monoui;
1180 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1181 g_error ("Larger allocation needed for the unwind information.");
1183 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1184 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1185 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1186 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1187 unwindcode->OpInfo = reg;
1189 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1190 g_error ("Adding unwind info in wrong order.");
1192 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1196 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1198 PMonoUnwindInfo unwindinfo;
1199 PUNWIND_CODE unwindcode;
1202 mono_arch_unwindinfo_create (monoui);
1204 unwindinfo = (MonoUnwindInfo*)*monoui;
1206 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1207 g_error ("Larger allocation needed for the unwind information.");
1209 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1210 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1211 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1213 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1214 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1215 unwindcode->OpInfo = reg;
1217 unwindinfo->unwindInfo.FrameRegister = reg;
1219 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1220 g_error ("Adding unwind info in wrong order.");
1222 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1226 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1228 PMonoUnwindInfo unwindinfo;
1229 PUNWIND_CODE unwindcode;
1233 mono_arch_unwindinfo_create (monoui);
1235 unwindinfo = (MonoUnwindInfo*)*monoui;
1238 g_error ("Stack allocation must be equal to or greater than 0x8.");
1242 else if (size <= 0x7FFF8)
1247 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1248 g_error ("Larger allocation needed for the unwind information.");
1250 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1251 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1253 if (codesneeded == 1) {
1254 /*The size of the allocation is
1255 (the number in the OpInfo member) times 8 plus 8*/
1256 unwindcode->OpInfo = (size - 8)/8;
1257 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1260 if (codesneeded == 3) {
1261 /*the unscaled size of the allocation is recorded
1262 in the next two slots in little-endian format*/
1263 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1265 unwindcode->OpInfo = 1;
1268 /*the size of the allocation divided by 8
1269 is recorded in the next slot*/
1270 unwindcode->FrameOffset = size/8;
1272 unwindcode->OpInfo = 0;
1275 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1278 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1280 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1281 g_error ("Adding unwind info in wrong order.");
1283 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1287 mono_arch_unwindinfo_get_size (gpointer monoui)
1289 PMonoUnwindInfo unwindinfo;
1293 unwindinfo = (MonoUnwindInfo*)monoui;
1294 return (8 + sizeof (MonoUnwindInfo)) -
1295 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1299 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1303 PMonoUnwindInfo targetinfo;
1304 MonoDomain *domain = mono_domain_get ();
1306 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1310 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1312 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1314 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1316 return &targetinfo->runtimeFunction;
1320 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1322 PMonoUnwindInfo unwindinfo, targetinfo;
1324 guint64 targetlocation;
1328 unwindinfo = (MonoUnwindInfo*)*monoui;
1329 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1330 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1332 unwindinfo->runtimeFunction.EndAddress = code_size;
1333 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1335 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1337 codecount = unwindinfo->unwindInfo.CountOfCodes;
1339 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1340 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1343 g_free (unwindinfo);
1346 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1351 #if MONO_SUPPORT_TASKLETS
1352 MonoContinuationRestore
1353 mono_tasklets_arch_restore (void)
1355 static guint8* saved = NULL;
1356 guint8 *code, *start;
1357 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1360 return (MonoContinuationRestore)saved;
1361 code = start = mono_global_codeman_reserve (64);
1362 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1363 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1364 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1365 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1366 * We move cont to cont_reg since we need both rcx and rdi for the copy
1367 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1369 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1370 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1371 /* setup the copy of the stack */
1372 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1373 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1375 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1376 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1377 amd64_prefix (code, X86_REP_PREFIX);
1380 /* now restore the registers from the LMF */
1381 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1382 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1383 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1384 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1385 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1386 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1387 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1389 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1390 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1392 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1394 /* restore the lmf chain */
1395 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1396 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1398 /* state is already in rax */
1399 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1400 g_assert ((code - start) <= 64);
1402 return (MonoContinuationRestore)saved;