2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler(0, er, sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
97 /* Copy context back */
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
134 fpe_handler = handler;
137 ill_handler = handler;
140 segv_handler = handler;
147 #endif /* TARGET_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
155 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
157 guint8 *start = NULL;
159 MonoJumpInfo *ji = NULL;
160 GSList *unwind_ops = NULL;
162 /* restore_contect (MonoContext *ctx) */
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
184 if (mono_running_on_valgrind ()) {
185 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
186 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
187 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
188 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
190 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
191 /* get return address */
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
201 *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
207 * mono_arch_get_call_filter:
209 * Returns a pointer to a method which calls an exception filter. We
210 * also use this function to call finally handlers (we pass NULL as
211 * @exc object in this case).
214 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
220 MonoJumpInfo *ji = NULL;
221 GSList *unwind_ops = NULL;
223 start = code = mono_global_codeman_reserve (128);
225 /* call_filter (MonoContext *ctx, unsigned long eip) */
228 /* Alloc new frame */
229 amd64_push_reg (code, AMD64_RBP);
230 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
232 /* Save callee saved regs */
234 for (i = 0; i < AMD64_NREG; ++i)
235 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
236 amd64_push_reg (code, i);
242 amd64_push_reg (code, AMD64_RBP);
244 /* Make stack misaligned, the call will make it aligned again */
246 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
249 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
250 /* load callee saved regs */
251 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
252 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
253 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
254 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
255 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
257 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
258 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
261 /* call the handler */
262 amd64_call_reg (code, AMD64_ARG_REG2);
265 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
268 amd64_pop_reg (code, AMD64_RBP);
270 /* Restore callee saved regs */
271 for (i = AMD64_NREG; i >= 0; --i)
272 if (AMD64_IS_CALLEE_SAVED_REG (i))
273 amd64_pop_reg (code, i);
278 g_assert ((code - start) < 128);
280 mono_arch_flush_icache (start, code - start);
283 *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
289 * The first few arguments are dummy, to force the other arguments to be passed on
290 * the stack, this avoids overwriting the argument registers in the throw trampoline.
293 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
294 guint64 dummy5, guint64 dummy6,
295 MonoObject *exc, guint64 rip, guint64 rsp,
296 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
297 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
298 guint64 rax, guint64 rcx, guint64 rdx,
301 static void (*restore_context) (MonoContext *);
304 if (!restore_context)
305 restore_context = mono_get_restore_context ();
321 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
322 MonoException *mono_ex = (MonoException*)exc;
324 mono_ex->stack_trace = NULL;
327 if (mono_debug_using_mono_debugger ()) {
328 guint8 buf [16], *code;
330 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
333 if (buf [3] == 0xe8) {
334 MonoContext ctx_cp = ctx;
335 ctx_cp.rip = rip - 5;
337 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
338 restore_context (&ctx_cp);
339 g_assert_not_reached ();
344 /* adjust eip so that it point into the call instruction */
347 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
348 restore_context (&ctx);
350 g_assert_not_reached ();
354 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
355 guint64 dummy5, guint64 dummy6,
356 guint32 ex_token_index,
357 guint64 rip, guint64 rsp,
358 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
359 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
360 guint64 rax, guint64 rcx, guint64 rdx,
363 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
366 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
370 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, (MonoObject*)ex, rip, rsp, rbx, rbp, r12, r13, r14, r15, rdi, rsi, rax, rcx, rdx, FALSE);
374 * get_throw_trampoline:
376 * Generate a call to mono_amd64_throw_exception/
377 * mono_amd64_throw_corlib_exception.
380 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean aot)
384 MonoJumpInfo *ji = NULL;
385 GSList *unwind_ops = NULL;
387 start = code = mono_global_codeman_reserve (64);
391 unwind_ops = mono_arch_get_cie_program ();
393 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
397 amd64_push_reg (code, AMD64_ARG_REG2);
399 amd64_push_imm (code, rethrow);
400 amd64_push_reg (code, AMD64_RDX);
401 amd64_push_reg (code, AMD64_RCX);
402 amd64_push_reg (code, AMD64_RAX);
403 amd64_push_reg (code, AMD64_RSI);
404 amd64_push_reg (code, AMD64_RDI);
405 amd64_push_reg (code, AMD64_R15);
406 amd64_push_reg (code, AMD64_R14);
407 amd64_push_reg (code, AMD64_R13);
408 amd64_push_reg (code, AMD64_R12);
409 amd64_push_reg (code, AMD64_RBP);
410 amd64_push_reg (code, AMD64_RBX);
413 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
414 amd64_push_reg (code, AMD64_RAX);
417 amd64_push_membase (code, AMD64_R11, 0);
421 amd64_push_reg (code, AMD64_ARG_REG1);
424 amd64_push_reg (code, AMD64_ARG_REG1);
426 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, (15 + 1) * sizeof (gpointer));
430 amd64_push_imm (code, 0);
431 amd64_push_imm (code, 0);
432 amd64_push_imm (code, 0);
433 amd64_push_imm (code, 0);
434 amd64_push_imm (code, 0);
435 amd64_push_imm (code, 0);
439 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
440 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
442 amd64_mov_reg_imm (code, AMD64_R11, corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception);
444 amd64_call_reg (code, AMD64_R11);
445 amd64_breakpoint (code);
447 mono_arch_flush_icache (start, code - start);
449 g_assert ((code - start) < 64);
451 mono_save_trampoline_xdebug_info ("throw_exception_trampoline", start, code - start, unwind_ops);
454 *info = mono_tramp_info_create (g_strdup_printf (corlib ? "throw_corlib_exception" : (rethrow ? "rethrow_exception" : "throw_exception")), start, code - start, ji, unwind_ops);
460 * mono_arch_get_throw_exception:
462 * Returns a function pointer which can be used to raise
463 * exceptions. The returned function has the following
464 * signature: void (*func) (MonoException *exc);
468 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
470 return get_throw_trampoline (info, FALSE, FALSE, aot);
474 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
476 return get_throw_trampoline (info, TRUE, FALSE, aot);
480 * mono_arch_get_throw_corlib_exception:
482 * Returns a function pointer which can be used to raise
483 * corlib exceptions. The returned function has the following
484 * signature: void (*func) (guint32 ex_token, guint32 offset);
485 * Here, offset is the offset which needs to be substracted from the caller IP
486 * to get the IP of the throw. Passing the offset has the advantage that it
487 * needs no relocations in the caller.
490 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
492 return get_throw_trampoline (info, FALSE, TRUE, aot);
496 * mono_arch_find_jit_info_ext:
498 * This function is used to gather information from @ctx, and store it in @frame_info.
499 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
500 * is modified if needed.
501 * Returns TRUE on success, FALSE otherwise.
502 * This function is a version of mono_arch_find_jit_info () where all the results are
503 * returned in a StackFrameInfo structure.
506 mono_arch_find_jit_info_ext (MonoDomain *domain, MonoJitTlsData *jit_tls,
507 MonoJitInfo *ji, MonoContext *ctx,
508 MonoContext *new_ctx, MonoLMF **lmf,
509 StackFrameInfo *frame)
511 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
513 memset (frame, 0, sizeof (StackFrameInfo));
515 frame->managed = FALSE;
520 gssize regs [MONO_MAX_IREGS + 1];
522 guint32 unwind_info_len;
525 frame->type = FRAME_TYPE_MANAGED;
527 if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)
528 frame->managed = TRUE;
531 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
533 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
535 regs [AMD64_RAX] = new_ctx->rax;
536 regs [AMD64_RBX] = new_ctx->rbx;
537 regs [AMD64_RCX] = new_ctx->rcx;
538 regs [AMD64_RDX] = new_ctx->rdx;
539 regs [AMD64_RBP] = new_ctx->rbp;
540 regs [AMD64_RSP] = new_ctx->rsp;
541 regs [AMD64_RSI] = new_ctx->rsi;
542 regs [AMD64_RDI] = new_ctx->rdi;
543 regs [AMD64_RIP] = new_ctx->rip;
544 regs [AMD64_R12] = new_ctx->r12;
545 regs [AMD64_R13] = new_ctx->r13;
546 regs [AMD64_R14] = new_ctx->r14;
547 regs [AMD64_R15] = new_ctx->r15;
549 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
550 (guint8*)ji->code_start + ji->code_size,
551 ip, regs, MONO_MAX_IREGS + 1, &cfa);
553 new_ctx->rax = regs [AMD64_RAX];
554 new_ctx->rbx = regs [AMD64_RBX];
555 new_ctx->rcx = regs [AMD64_RCX];
556 new_ctx->rdx = regs [AMD64_RDX];
557 new_ctx->rbp = regs [AMD64_RBP];
558 new_ctx->rsp = regs [AMD64_RSP];
559 new_ctx->rsi = regs [AMD64_RSI];
560 new_ctx->rdi = regs [AMD64_RDI];
561 new_ctx->rip = regs [AMD64_RIP];
562 new_ctx->r12 = regs [AMD64_R12];
563 new_ctx->r13 = regs [AMD64_R13];
564 new_ctx->r14 = regs [AMD64_R14];
565 new_ctx->r15 = regs [AMD64_R15];
567 /* The CFA becomes the new SP value */
568 new_ctx->rsp = (gssize)cfa;
573 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
574 /* remove any unused lmf */
575 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
578 #ifndef MONO_AMD64_NO_PUSHES
579 /* Pop arguments off the stack */
581 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
583 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
584 new_ctx->rsp += stack_to_pop;
592 if (((guint64)(*lmf)->previous_lmf) & 2) {
594 * This LMF entry is created by the soft debug code to mark transitions to
595 * managed code done during invokes.
597 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
599 g_assert (ext->debugger_invoke);
601 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
603 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
605 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
610 if (((guint64)(*lmf)->previous_lmf) & 1) {
611 /* This LMF has the rip field set */
613 } else if ((*lmf)->rsp == 0) {
618 * The rsp field is set just before the call which transitioned to native
619 * code. Obtain the rip from the stack.
621 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
624 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
626 // FIXME: This can happen with multiple appdomains (bug #444383)
631 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
634 new_ctx->rbp = (*lmf)->rbp;
635 new_ctx->rsp = (*lmf)->rsp;
637 new_ctx->rbx = (*lmf)->rbx;
638 new_ctx->r12 = (*lmf)->r12;
639 new_ctx->r13 = (*lmf)->r13;
640 new_ctx->r14 = (*lmf)->r14;
641 new_ctx->r15 = (*lmf)->r15;
643 new_ctx->rdi = (*lmf)->rdi;
644 new_ctx->rsi = (*lmf)->rsi;
647 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
656 * mono_arch_handle_exception:
658 * @ctx: saved processor state
659 * @obj: the exception object
662 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
666 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
668 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
671 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
673 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
678 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
679 static inline guint64*
680 gregs_from_ucontext (ucontext_t *ctx)
682 return (guint64 *) UCONTEXT_GREGS (ctx);
686 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
688 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
689 ucontext_t *ctx = (ucontext_t*)sigctx;
691 guint64 *gregs = gregs_from_ucontext (ctx);
693 mctx->rax = gregs [REG_RAX];
694 mctx->rbx = gregs [REG_RBX];
695 mctx->rcx = gregs [REG_RCX];
696 mctx->rdx = gregs [REG_RDX];
697 mctx->rbp = gregs [REG_RBP];
698 mctx->rsp = gregs [REG_RSP];
699 mctx->rsi = gregs [REG_RSI];
700 mctx->rdi = gregs [REG_RDI];
701 mctx->rip = gregs [REG_RIP];
702 mctx->r12 = gregs [REG_R12];
703 mctx->r13 = gregs [REG_R13];
704 mctx->r14 = gregs [REG_R14];
705 mctx->r15 = gregs [REG_R15];
706 #elif defined(MONO_ARCH_USE_SIGACTION)
707 ucontext_t *ctx = (ucontext_t*)sigctx;
709 mctx->rax = UCONTEXT_REG_RAX (ctx);
710 mctx->rbx = UCONTEXT_REG_RBX (ctx);
711 mctx->rcx = UCONTEXT_REG_RCX (ctx);
712 mctx->rdx = UCONTEXT_REG_RDX (ctx);
713 mctx->rbp = UCONTEXT_REG_RBP (ctx);
714 mctx->rsp = UCONTEXT_REG_RSP (ctx);
715 mctx->rsi = UCONTEXT_REG_RSI (ctx);
716 mctx->rdi = UCONTEXT_REG_RDI (ctx);
717 mctx->rip = UCONTEXT_REG_RIP (ctx);
718 mctx->r12 = UCONTEXT_REG_R12 (ctx);
719 mctx->r13 = UCONTEXT_REG_R13 (ctx);
720 mctx->r14 = UCONTEXT_REG_R14 (ctx);
721 mctx->r15 = UCONTEXT_REG_R15 (ctx);
723 MonoContext *ctx = (MonoContext *)sigctx;
725 mctx->rax = ctx->rax;
726 mctx->rbx = ctx->rbx;
727 mctx->rcx = ctx->rcx;
728 mctx->rdx = ctx->rdx;
729 mctx->rbp = ctx->rbp;
730 mctx->rsp = ctx->rsp;
731 mctx->rsi = ctx->rsi;
732 mctx->rdi = ctx->rdi;
733 mctx->rip = ctx->rip;
734 mctx->r12 = ctx->r12;
735 mctx->r13 = ctx->r13;
736 mctx->r14 = ctx->r14;
737 mctx->r15 = ctx->r15;
742 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
744 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
745 ucontext_t *ctx = (ucontext_t*)sigctx;
747 guint64 *gregs = gregs_from_ucontext (ctx);
749 gregs [REG_RAX] = mctx->rax;
750 gregs [REG_RBX] = mctx->rbx;
751 gregs [REG_RCX] = mctx->rcx;
752 gregs [REG_RDX] = mctx->rdx;
753 gregs [REG_RBP] = mctx->rbp;
754 gregs [REG_RSP] = mctx->rsp;
755 gregs [REG_RSI] = mctx->rsi;
756 gregs [REG_RDI] = mctx->rdi;
757 gregs [REG_RIP] = mctx->rip;
758 gregs [REG_R12] = mctx->r12;
759 gregs [REG_R13] = mctx->r13;
760 gregs [REG_R14] = mctx->r14;
761 gregs [REG_R15] = mctx->r15;
762 #elif defined(MONO_ARCH_USE_SIGACTION)
763 ucontext_t *ctx = (ucontext_t*)sigctx;
765 UCONTEXT_REG_RAX (ctx) = mctx->rax;
766 UCONTEXT_REG_RBX (ctx) = mctx->rbx;
767 UCONTEXT_REG_RCX (ctx) = mctx->rcx;
768 UCONTEXT_REG_RDX (ctx) = mctx->rdx;
769 UCONTEXT_REG_RBP (ctx) = mctx->rbp;
770 UCONTEXT_REG_RSP (ctx) = mctx->rsp;
771 UCONTEXT_REG_RSI (ctx) = mctx->rsi;
772 UCONTEXT_REG_RDI (ctx) = mctx->rdi;
773 UCONTEXT_REG_RIP (ctx) = mctx->rip;
774 UCONTEXT_REG_R12 (ctx) = mctx->r12;
775 UCONTEXT_REG_R13 (ctx) = mctx->r13;
776 UCONTEXT_REG_R14 (ctx) = mctx->r14;
777 UCONTEXT_REG_R15 (ctx) = mctx->r15;
779 MonoContext *ctx = (MonoContext *)sigctx;
781 ctx->rax = mctx->rax;
782 ctx->rbx = mctx->rbx;
783 ctx->rcx = mctx->rcx;
784 ctx->rdx = mctx->rdx;
785 ctx->rbp = mctx->rbp;
786 ctx->rsp = mctx->rsp;
787 ctx->rsi = mctx->rsi;
788 ctx->rdi = mctx->rdi;
789 ctx->rip = mctx->rip;
790 ctx->r12 = mctx->r12;
791 ctx->r13 = mctx->r13;
792 ctx->r14 = mctx->r14;
793 ctx->r15 = mctx->r15;
798 mono_arch_ip_from_context (void *sigctx)
800 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
801 ucontext_t *ctx = (ucontext_t*)sigctx;
803 guint64 *gregs = gregs_from_ucontext (ctx);
805 return (gpointer)gregs [REG_RIP];
806 #elif defined(MONO_ARCH_USE_SIGACTION)
807 ucontext_t *ctx = (ucontext_t*)sigctx;
809 return (gpointer)UCONTEXT_REG_RIP (ctx);
811 MonoContext *ctx = sigctx;
812 return (gpointer)ctx->rip;
817 restore_soft_guard_pages (void)
819 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
820 if (jit_tls->stack_ovf_guard_base)
821 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
825 * this function modifies mctx so that when it is restored, it
826 * won't execcute starting at mctx.eip, but in a function that
827 * will restore the protection on the soft-guard pages and return back to
828 * continue at mctx.eip.
831 prepare_for_guard_pages (MonoContext *mctx)
834 sp = (gpointer)(mctx->rsp);
836 /* the return addr */
837 sp [0] = (gpointer)(mctx->rip);
838 mctx->rip = (guint64)restore_soft_guard_pages;
839 mctx->rsp = (guint64)sp;
843 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
845 void (*restore_context) (MonoContext *);
848 restore_context = mono_get_restore_context ();
849 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
851 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
853 prepare_for_guard_pages (&mctx);
854 restore_context (&mctx);
857 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
859 prepare_for_guard_pages (&mctx);
860 restore_context (&mctx);
864 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
866 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
867 MonoException *exc = NULL;
868 ucontext_t *ctx = (ucontext_t*)sigctx;
869 guint64 *gregs = gregs_from_ucontext (ctx);
870 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP], NULL);
875 exc = mono_domain_get ()->stack_overflow_ex;
877 mono_handle_native_sigsegv (SIGSEGV, sigctx);
879 /* setup a call frame on the real stack so that control is returned there
880 * and exception handling can continue.
881 * The frame looks like:
885 * 128 is the size of the red zone
887 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
890 sp = (gpointer)(gregs [REG_RSP] & ~15);
891 sp = (gpointer)((char*)sp - frame_size);
892 /* the arguments must be aligned */
893 sp [-1] = (gpointer)gregs [REG_RIP];
894 /* may need to adjust pointers in the new struct copy, depending on the OS */
895 memcpy (sp + 4, ctx, sizeof (ucontext_t));
896 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
897 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
898 gregs [REG_RSP] = (unsigned long)(sp - 1);
899 gregs [REG_RDI] = (unsigned long)(sp + 4);
900 gregs [REG_RSI] = (guint64)exc;
901 gregs [REG_RDX] = stack_ovf;
906 mono_amd64_get_original_ip (void)
908 MonoLMF *lmf = mono_get_lmf ();
912 /* Reset the change to previous_lmf */
913 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
919 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
921 guint8 *code, *start;
923 gpointer throw_trampoline;
924 MonoJumpInfo *ji = NULL;
925 GSList *unwind_ops = NULL;
927 start = code = mono_global_codeman_reserve (128);
929 /* We are in the frame of a managed method after a call */
931 * We would like to throw the pending exception in such a way that it looks to
932 * be thrown from the managed method.
935 /* Save registers which might contain the return value of the call */
936 amd64_push_reg (code, AMD64_RAX);
937 amd64_push_reg (code, AMD64_RDX);
939 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
940 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
943 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
945 /* Obtain the pending exception */
947 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
948 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
950 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
952 amd64_call_reg (code, AMD64_R11);
954 /* Check if it is NULL, and branch */
955 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
956 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
958 /* exc != NULL branch */
960 /* Save the exc on the stack */
961 amd64_push_reg (code, AMD64_RAX);
963 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
965 /* Obtain the original ip and clear the flag in previous_lmf */
967 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
968 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
970 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
972 amd64_call_reg (code, AMD64_R11);
975 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
977 /* Pop saved stuff from the stack */
978 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
980 /* Setup arguments for the throw trampoline */
982 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
983 /* The trampoline expects the caller ip to be pushed on the stack */
984 amd64_push_reg (code, AMD64_RAX);
986 /* Call the throw trampoline */
988 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
989 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
991 throw_trampoline = mono_get_throw_exception ();
992 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
994 /* We use a jump instead of a call so we can push the original ip on the stack */
995 amd64_jump_reg (code, AMD64_R11);
997 /* ex == NULL branch */
998 mono_amd64_patch (br [0], code);
1000 /* Obtain the original ip and clear the flag in previous_lmf */
1002 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1003 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1005 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1007 amd64_call_reg (code, AMD64_R11);
1008 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1010 /* Restore registers */
1011 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1012 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1013 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1014 amd64_pop_reg (code, AMD64_RDX);
1015 amd64_pop_reg (code, AMD64_RAX);
1017 /* Return to original code */
1018 amd64_jump_reg (code, AMD64_R11);
1020 g_assert ((code - start) < 128);
1023 *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
1028 static gpointer throw_pending_exception;
1031 * Called when a thread receives an async exception while executing unmanaged code.
1032 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1033 * the return address on the stack to point to a helper routine which throws the
1037 mono_arch_notify_pending_exc (void)
1039 MonoLMF *lmf = mono_get_lmf ();
1042 /* Not yet started */
1049 if ((guint64)lmf->previous_lmf & 1)
1050 /* Already hijacked or trampoline LMF entry */
1053 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1054 lmf->rip = *(guint64*)(lmf->rsp - 8);
1055 /* Signal that lmf->rip is set */
1056 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1058 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1062 mono_arch_exceptions_init (void)
1064 if (mono_aot_only) {
1065 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1067 /* Call this to avoid initialization races */
1068 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1075 * The mono_arch_unwindinfo* methods are used to build and add
1076 * function table info for each emitted method from mono. On Winx64
1077 * the seh handler will not be called if the mono methods are not
1078 * added to the function table.
1080 * We should not need to add non-volatile register info to the
1081 * table since mono stores that info elsewhere. (Except for the register
1085 #define MONO_MAX_UNWIND_CODES 22
1087 typedef union _UNWIND_CODE {
1090 guchar UnwindOp : 4;
1093 gushort FrameOffset;
1094 } UNWIND_CODE, *PUNWIND_CODE;
1096 typedef struct _UNWIND_INFO {
1099 guchar SizeOfProlog;
1100 guchar CountOfCodes;
1101 guchar FrameRegister : 4;
1102 guchar FrameOffset : 4;
1103 /* custom size for mono allowing for mono allowing for*/
1104 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1105 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1106 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1107 /*UWOP_PUSH_NONVOL offset = 15-0*/
1108 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1110 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1112 * OPTIONAL ULONG ExceptionHandler;
1113 * OPTIONAL ULONG FunctionEntry;
1115 * OPTIONAL ULONG ExceptionData[]; */
1116 } UNWIND_INFO, *PUNWIND_INFO;
1120 RUNTIME_FUNCTION runtimeFunction;
1121 UNWIND_INFO unwindInfo;
1122 } MonoUnwindInfo, *PMonoUnwindInfo;
1125 mono_arch_unwindinfo_create (gpointer* monoui)
1127 PMonoUnwindInfo newunwindinfo;
1128 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1129 newunwindinfo->unwindInfo.Version = 1;
1133 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1135 PMonoUnwindInfo unwindinfo;
1136 PUNWIND_CODE unwindcode;
1139 mono_arch_unwindinfo_create (monoui);
1141 unwindinfo = (MonoUnwindInfo*)*monoui;
1143 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1144 g_error ("Larger allocation needed for the unwind information.");
1146 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1147 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1148 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1149 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1150 unwindcode->OpInfo = reg;
1152 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1153 g_error ("Adding unwind info in wrong order.");
1155 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1159 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1161 PMonoUnwindInfo unwindinfo;
1162 PUNWIND_CODE unwindcode;
1165 mono_arch_unwindinfo_create (monoui);
1167 unwindinfo = (MonoUnwindInfo*)*monoui;
1169 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1170 g_error ("Larger allocation needed for the unwind information.");
1172 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1173 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1174 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1176 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1177 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1178 unwindcode->OpInfo = reg;
1180 unwindinfo->unwindInfo.FrameRegister = reg;
1182 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1183 g_error ("Adding unwind info in wrong order.");
1185 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1189 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1191 PMonoUnwindInfo unwindinfo;
1192 PUNWIND_CODE unwindcode;
1196 mono_arch_unwindinfo_create (monoui);
1198 unwindinfo = (MonoUnwindInfo*)*monoui;
1201 g_error ("Stack allocation must be equal to or greater than 0x8.");
1205 else if (size <= 0x7FFF8)
1210 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1211 g_error ("Larger allocation needed for the unwind information.");
1213 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1214 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1216 if (codesneeded == 1) {
1217 /*The size of the allocation is
1218 (the number in the OpInfo member) times 8 plus 8*/
1219 unwindcode->OpInfo = (size - 8)/8;
1220 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1223 if (codesneeded == 3) {
1224 /*the unscaled size of the allocation is recorded
1225 in the next two slots in little-endian format*/
1226 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1228 unwindcode->OpInfo = 1;
1231 /*the size of the allocation divided by 8
1232 is recorded in the next slot*/
1233 unwindcode->FrameOffset = size/8;
1235 unwindcode->OpInfo = 0;
1238 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1241 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1243 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1244 g_error ("Adding unwind info in wrong order.");
1246 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1250 mono_arch_unwindinfo_get_size (gpointer monoui)
1252 PMonoUnwindInfo unwindinfo;
1256 unwindinfo = (MonoUnwindInfo*)monoui;
1257 return (8 + sizeof (MonoUnwindInfo)) -
1258 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1262 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1266 PMonoUnwindInfo targetinfo;
1267 MonoDomain *domain = mono_domain_get ();
1269 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1273 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1275 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1277 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1279 return &targetinfo->runtimeFunction;
1283 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1285 PMonoUnwindInfo unwindinfo, targetinfo;
1287 guint64 targetlocation;
1291 unwindinfo = (MonoUnwindInfo*)*monoui;
1292 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1293 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1295 unwindinfo->runtimeFunction.EndAddress = code_size;
1296 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1298 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1300 codecount = unwindinfo->unwindInfo.CountOfCodes;
1302 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1303 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1306 g_free (unwindinfo);
1309 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1314 #if MONO_SUPPORT_TASKLETS
1315 MonoContinuationRestore
1316 mono_tasklets_arch_restore (void)
1318 static guint8* saved = NULL;
1319 guint8 *code, *start;
1320 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1323 return (MonoContinuationRestore)saved;
1324 code = start = mono_global_codeman_reserve (64);
1325 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1326 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1327 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1328 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1329 * We move cont to cont_reg since we need both rcx and rdi for the copy
1330 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1332 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1333 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1334 /* setup the copy of the stack */
1335 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1336 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1338 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1339 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1340 amd64_prefix (code, X86_REP_PREFIX);
1343 /* now restore the registers from the LMF */
1344 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1345 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1346 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1347 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1348 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1349 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1350 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1352 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1353 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1355 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1357 /* restore the lmf chain */
1358 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1359 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1361 /* state is already in rax */
1362 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1363 g_assert ((code - start) <= 64);
1365 return (MonoContinuationRestore)saved;