2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler(0, er, sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
97 /* Copy context back */
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
134 fpe_handler = handler;
137 ill_handler = handler;
140 segv_handler = handler;
147 #endif /* TARGET_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
155 mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
157 guint8 *start = NULL;
160 /* restore_contect (MonoContext *ctx) */
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
184 if (mono_running_on_valgrind ()) {
185 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
186 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
187 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
188 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
190 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
191 /* get return address */
192 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
195 /* jump to the saved IP */
196 amd64_jump_reg (code, AMD64_R11);
198 mono_arch_flush_icache (start, code - start);
200 *code_size = code - start;
206 * mono_arch_get_call_filter:
208 * Returns a pointer to a method which calls an exception filter. We
209 * also use this function to call finally handlers (we pass NULL as
210 * @exc object in this case).
213 mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
222 start = code = mono_global_codeman_reserve (128);
224 /* call_filter (MonoContext *ctx, unsigned long eip) */
227 /* Alloc new frame */
228 amd64_push_reg (code, AMD64_RBP);
229 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
231 /* Save callee saved regs */
233 for (i = 0; i < AMD64_NREG; ++i)
234 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
235 amd64_push_reg (code, i);
241 amd64_push_reg (code, AMD64_RBP);
243 /* Make stack misaligned, the call will make it aligned again */
245 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
248 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
249 /* load callee saved regs */
250 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
251 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
252 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
253 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
254 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
256 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
257 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
260 /* call the handler */
261 amd64_call_reg (code, AMD64_ARG_REG2);
264 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
267 amd64_pop_reg (code, AMD64_RBP);
269 /* Restore callee saved regs */
270 for (i = AMD64_NREG; i >= 0; --i)
271 if (AMD64_IS_CALLEE_SAVED_REG (i))
272 amd64_pop_reg (code, i);
277 g_assert ((code - start) < 128);
279 mono_arch_flush_icache (start, code - start);
281 *code_size = code - start;
287 * The first few arguments are dummy, to force the other arguments to be passed on
288 * the stack, this avoids overwriting the argument registers in the throw trampoline.
291 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
292 guint64 dummy5, guint64 dummy6,
293 MonoObject *exc, guint64 rip, guint64 rsp,
294 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
295 guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
296 guint64 rax, guint64 rcx, guint64 rdx,
299 static void (*restore_context) (MonoContext *);
302 if (!restore_context)
303 restore_context = mono_get_restore_context ();
319 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
320 MonoException *mono_ex = (MonoException*)exc;
322 mono_ex->stack_trace = NULL;
325 if (mono_debug_using_mono_debugger ()) {
326 guint8 buf [16], *code;
328 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
331 if (buf [3] == 0xe8) {
332 MonoContext ctx_cp = ctx;
333 ctx_cp.rip = rip - 5;
335 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
336 restore_context (&ctx_cp);
337 g_assert_not_reached ();
342 /* adjust eip so that it point into the call instruction */
345 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
346 restore_context (&ctx);
348 g_assert_not_reached ();
352 get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
358 start = code = mono_global_codeman_reserve (64);
364 unwind_ops = mono_arch_get_cie_program ();
366 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
369 amd64_push_imm (code, rethrow);
370 amd64_push_reg (code, AMD64_RDX);
371 amd64_push_reg (code, AMD64_RCX);
372 amd64_push_reg (code, AMD64_RAX);
373 amd64_push_reg (code, AMD64_RSI);
374 amd64_push_reg (code, AMD64_RDI);
375 amd64_push_reg (code, AMD64_R15);
376 amd64_push_reg (code, AMD64_R14);
377 amd64_push_reg (code, AMD64_R13);
378 amd64_push_reg (code, AMD64_R12);
379 amd64_push_reg (code, AMD64_RBP);
380 amd64_push_reg (code, AMD64_RBX);
383 amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
384 amd64_push_reg (code, AMD64_RAX);
387 amd64_push_membase (code, AMD64_R11, 0);
390 amd64_push_reg (code, AMD64_ARG_REG1);
392 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, (15 + 1) * sizeof (gpointer));
396 amd64_push_imm (code, 0);
397 amd64_push_imm (code, 0);
398 amd64_push_imm (code, 0);
399 amd64_push_imm (code, 0);
400 amd64_push_imm (code, 0);
401 amd64_push_imm (code, 0);
405 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
406 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
408 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception);
410 amd64_call_reg (code, AMD64_R11);
411 amd64_breakpoint (code);
413 mono_arch_flush_icache (start, code - start);
415 g_assert ((code - start) < 64);
417 *code_size = code - start;
419 mono_save_trampoline_xdebug_info ("throw_exception_trampoline", start, code - start, unwind_ops);
425 * mono_arch_get_throw_exception:
427 * Returns a function pointer which can be used to raise
428 * exceptions. The returned function has the following
429 * signature: void (*func) (MonoException *exc);
433 mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
435 return get_throw_trampoline (FALSE, code_size, ji, aot);
439 mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
441 return get_throw_trampoline (TRUE, code_size, ji, aot);
445 * mono_arch_get_throw_corlib_exception:
447 * Returns a function pointer which can be used to raise
448 * corlib exceptions. The returned function has the following
449 * signature: void (*func) (guint32 ex_token, guint32 offset);
450 * Here, offset is the offset which needs to be substracted from the caller IP
451 * to get the IP of the throw. Passing the offset has the advantage that it
452 * needs no relocations in the caller.
455 mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
457 static guint8* start;
461 start = code = mono_global_codeman_reserve (64);
466 amd64_push_reg (code, AMD64_ARG_REG2);
468 /* Call exception_from_token */
469 amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
471 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image);
472 amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8);
473 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token");
474 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
476 amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
477 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
480 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
482 amd64_call_reg (code, AMD64_R11);
484 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
487 /* Compute throw_ip */
488 amd64_pop_reg (code, AMD64_ARG_REG2);
490 amd64_pop_reg (code, AMD64_ARG_REG3);
491 amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
493 /* Put the throw_ip at the top of the misaligned stack */
494 amd64_push_reg (code, AMD64_ARG_REG3);
496 throw_ex = (guint64)mono_get_throw_exception ();
498 /* Call throw_exception */
499 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
501 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception");
502 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
504 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
506 /* The original IP is on the stack */
507 amd64_jump_reg (code, AMD64_R11);
509 g_assert ((code - start) < 64);
511 mono_arch_flush_icache (start, code - start);
513 *code_size = code - start;
519 * mono_arch_find_jit_info_ext:
521 * This function is used to gather information from @ctx, and store it in @frame_info.
522 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
523 * is modified if needed.
524 * Returns TRUE on success, FALSE otherwise.
525 * This function is a version of mono_arch_find_jit_info () where all the results are
526 * returned in a StackFrameInfo structure.
529 mono_arch_find_jit_info_ext (MonoDomain *domain, MonoJitTlsData *jit_tls,
530 MonoJitInfo *ji, MonoContext *ctx,
531 MonoContext *new_ctx, MonoLMF **lmf,
532 StackFrameInfo *frame)
534 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
536 memset (frame, 0, sizeof (StackFrameInfo));
538 frame->managed = FALSE;
543 gssize regs [MONO_MAX_IREGS + 1];
545 guint32 unwind_info_len;
548 frame->type = FRAME_TYPE_MANAGED;
550 if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)
551 frame->managed = TRUE;
554 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
556 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
558 regs [AMD64_RAX] = new_ctx->rax;
559 regs [AMD64_RBX] = new_ctx->rbx;
560 regs [AMD64_RCX] = new_ctx->rcx;
561 regs [AMD64_RDX] = new_ctx->rdx;
562 regs [AMD64_RBP] = new_ctx->rbp;
563 regs [AMD64_RSP] = new_ctx->rsp;
564 regs [AMD64_RSI] = new_ctx->rsi;
565 regs [AMD64_RDI] = new_ctx->rdi;
566 regs [AMD64_RIP] = new_ctx->rip;
567 regs [AMD64_R12] = new_ctx->r12;
568 regs [AMD64_R13] = new_ctx->r13;
569 regs [AMD64_R14] = new_ctx->r14;
570 regs [AMD64_R15] = new_ctx->r15;
572 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
573 (guint8*)ji->code_start + ji->code_size,
574 ip, regs, MONO_MAX_IREGS + 1, &cfa);
576 new_ctx->rax = regs [AMD64_RAX];
577 new_ctx->rbx = regs [AMD64_RBX];
578 new_ctx->rcx = regs [AMD64_RCX];
579 new_ctx->rdx = regs [AMD64_RDX];
580 new_ctx->rbp = regs [AMD64_RBP];
581 new_ctx->rsp = regs [AMD64_RSP];
582 new_ctx->rsi = regs [AMD64_RSI];
583 new_ctx->rdi = regs [AMD64_RDI];
584 new_ctx->rip = regs [AMD64_RIP];
585 new_ctx->r12 = regs [AMD64_R12];
586 new_ctx->r13 = regs [AMD64_R13];
587 new_ctx->r14 = regs [AMD64_R14];
588 new_ctx->r15 = regs [AMD64_R15];
590 /* The CFA becomes the new SP value */
591 new_ctx->rsp = (gssize)cfa;
596 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
597 /* remove any unused lmf */
598 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
601 #ifndef MONO_AMD64_NO_PUSHES
602 /* Pop arguments off the stack */
604 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
606 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
607 new_ctx->rsp += stack_to_pop;
615 if (((guint64)(*lmf)->previous_lmf) & 2) {
617 * This LMF entry is created by the soft debug code to mark transitions to
618 * managed code done during invokes.
620 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
622 g_assert (ext->debugger_invoke);
624 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
626 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
628 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
633 if (((guint64)(*lmf)->previous_lmf) & 1) {
634 /* This LMF has the rip field set */
636 } else if ((*lmf)->rsp == 0) {
641 * The rsp field is set just before the call which transitioned to native
642 * code. Obtain the rip from the stack.
644 rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
647 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
649 // FIXME: This can happen with multiple appdomains (bug #444383)
654 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
657 new_ctx->rbp = (*lmf)->rbp;
658 new_ctx->rsp = (*lmf)->rsp;
660 new_ctx->rbx = (*lmf)->rbx;
661 new_ctx->r12 = (*lmf)->r12;
662 new_ctx->r13 = (*lmf)->r13;
663 new_ctx->r14 = (*lmf)->r14;
664 new_ctx->r15 = (*lmf)->r15;
666 new_ctx->rdi = (*lmf)->rdi;
667 new_ctx->rsi = (*lmf)->rsi;
670 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
679 * mono_arch_handle_exception:
681 * @ctx: saved processor state
682 * @obj: the exception object
685 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
689 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
691 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
694 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
696 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
701 #ifdef MONO_ARCH_USE_SIGACTION
702 static inline guint64*
703 gregs_from_ucontext (ucontext_t *ctx)
705 return (guint64 *) UCONTEXT_GREGS (ctx);
709 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
711 #ifdef MONO_ARCH_USE_SIGACTION
712 ucontext_t *ctx = (ucontext_t*)sigctx;
714 guint64 *gregs = gregs_from_ucontext (ctx);
716 mctx->rax = gregs [REG_RAX];
717 mctx->rbx = gregs [REG_RBX];
718 mctx->rcx = gregs [REG_RCX];
719 mctx->rdx = gregs [REG_RDX];
720 mctx->rbp = gregs [REG_RBP];
721 mctx->rsp = gregs [REG_RSP];
722 mctx->rsi = gregs [REG_RSI];
723 mctx->rdi = gregs [REG_RDI];
724 mctx->rip = gregs [REG_RIP];
725 mctx->r12 = gregs [REG_R12];
726 mctx->r13 = gregs [REG_R13];
727 mctx->r14 = gregs [REG_R14];
728 mctx->r15 = gregs [REG_R15];
730 MonoContext *ctx = (MonoContext *)sigctx;
732 mctx->rax = ctx->rax;
733 mctx->rbx = ctx->rbx;
734 mctx->rcx = ctx->rcx;
735 mctx->rdx = ctx->rdx;
736 mctx->rbp = ctx->rbp;
737 mctx->rsp = ctx->rsp;
738 mctx->rsi = ctx->rsi;
739 mctx->rdi = ctx->rdi;
740 mctx->rip = ctx->rip;
741 mctx->r12 = ctx->r12;
742 mctx->r13 = ctx->r13;
743 mctx->r14 = ctx->r14;
744 mctx->r15 = ctx->r15;
749 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
751 #ifdef MONO_ARCH_USE_SIGACTION
752 ucontext_t *ctx = (ucontext_t*)sigctx;
754 guint64 *gregs = gregs_from_ucontext (ctx);
756 gregs [REG_RAX] = mctx->rax;
757 gregs [REG_RBX] = mctx->rbx;
758 gregs [REG_RCX] = mctx->rcx;
759 gregs [REG_RDX] = mctx->rdx;
760 gregs [REG_RBP] = mctx->rbp;
761 gregs [REG_RSP] = mctx->rsp;
762 gregs [REG_RSI] = mctx->rsi;
763 gregs [REG_RDI] = mctx->rdi;
764 gregs [REG_RIP] = mctx->rip;
765 gregs [REG_R12] = mctx->r12;
766 gregs [REG_R13] = mctx->r13;
767 gregs [REG_R14] = mctx->r14;
768 gregs [REG_R15] = mctx->r15;
770 MonoContext *ctx = (MonoContext *)sigctx;
772 ctx->rax = mctx->rax;
773 ctx->rbx = mctx->rbx;
774 ctx->rcx = mctx->rcx;
775 ctx->rdx = mctx->rdx;
776 ctx->rbp = mctx->rbp;
777 ctx->rsp = mctx->rsp;
778 ctx->rsi = mctx->rsi;
779 ctx->rdi = mctx->rdi;
780 ctx->rip = mctx->rip;
781 ctx->r12 = mctx->r12;
782 ctx->r13 = mctx->r13;
783 ctx->r14 = mctx->r14;
784 ctx->r15 = mctx->r15;
789 mono_arch_ip_from_context (void *sigctx)
792 #ifdef MONO_ARCH_USE_SIGACTION
794 ucontext_t *ctx = (ucontext_t*)sigctx;
796 guint64 *gregs = gregs_from_ucontext (ctx);
798 return (gpointer)gregs [REG_RIP];
800 MonoContext *ctx = sigctx;
801 return (gpointer)ctx->rip;
806 restore_soft_guard_pages (void)
808 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
809 if (jit_tls->stack_ovf_guard_base)
810 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
814 * this function modifies mctx so that when it is restored, it
815 * won't execcute starting at mctx.eip, but in a function that
816 * will restore the protection on the soft-guard pages and return back to
817 * continue at mctx.eip.
820 prepare_for_guard_pages (MonoContext *mctx)
823 sp = (gpointer)(mctx->rsp);
825 /* the return addr */
826 sp [0] = (gpointer)(mctx->rip);
827 mctx->rip = (guint64)restore_soft_guard_pages;
828 mctx->rsp = (guint64)sp;
832 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
834 void (*restore_context) (MonoContext *);
837 restore_context = mono_get_restore_context ();
838 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
840 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
842 prepare_for_guard_pages (&mctx);
843 restore_context (&mctx);
846 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
848 prepare_for_guard_pages (&mctx);
849 restore_context (&mctx);
853 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
855 #ifdef MONO_ARCH_USE_SIGACTION
856 MonoException *exc = NULL;
857 ucontext_t *ctx = (ucontext_t*)sigctx;
858 guint64 *gregs = gregs_from_ucontext (ctx);
859 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP], NULL);
864 exc = mono_domain_get ()->stack_overflow_ex;
866 mono_handle_native_sigsegv (SIGSEGV, sigctx);
868 /* setup a call frame on the real stack so that control is returned there
869 * and exception handling can continue.
870 * The frame looks like:
874 * 128 is the size of the red zone
876 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
879 sp = (gpointer)(gregs [REG_RSP] & ~15);
880 sp = (gpointer)((char*)sp - frame_size);
881 /* the arguments must be aligned */
882 sp [-1] = (gpointer)gregs [REG_RIP];
883 /* may need to adjust pointers in the new struct copy, depending on the OS */
884 memcpy (sp + 4, ctx, sizeof (ucontext_t));
885 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
886 gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
887 gregs [REG_RSP] = (unsigned long)(sp - 1);
888 gregs [REG_RDI] = (unsigned long)(sp + 4);
889 gregs [REG_RSI] = (guint64)exc;
890 gregs [REG_RDX] = stack_ovf;
895 mono_amd64_get_original_ip (void)
897 MonoLMF *lmf = mono_get_lmf ();
901 /* Reset the change to previous_lmf */
902 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
908 mono_arch_get_throw_pending_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
910 guint8 *code, *start;
912 gpointer throw_trampoline;
916 start = code = mono_global_codeman_reserve (128);
918 /* We are in the frame of a managed method after a call */
920 * We would like to throw the pending exception in such a way that it looks to
921 * be thrown from the managed method.
924 /* Save registers which might contain the return value of the call */
925 amd64_push_reg (code, AMD64_RAX);
926 amd64_push_reg (code, AMD64_RDX);
928 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
929 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
932 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
934 /* Obtain the pending exception */
936 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
937 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
939 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
941 amd64_call_reg (code, AMD64_R11);
943 /* Check if it is NULL, and branch */
944 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
945 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
947 /* exc != NULL branch */
949 /* Save the exc on the stack */
950 amd64_push_reg (code, AMD64_RAX);
952 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
954 /* Obtain the original ip and clear the flag in previous_lmf */
956 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
957 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
959 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
961 amd64_call_reg (code, AMD64_R11);
964 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
966 /* Pop saved stuff from the stack */
967 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
969 /* Setup arguments for the throw trampoline */
971 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
972 /* The trampoline expects the caller ip to be pushed on the stack */
973 amd64_push_reg (code, AMD64_RAX);
975 /* Call the throw trampoline */
977 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
978 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
980 throw_trampoline = mono_get_throw_exception ();
981 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
983 /* We use a jump instead of a call so we can push the original ip on the stack */
984 amd64_jump_reg (code, AMD64_R11);
986 /* ex == NULL branch */
987 mono_amd64_patch (br [0], code);
989 /* Obtain the original ip and clear the flag in previous_lmf */
991 *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
992 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
994 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
996 amd64_call_reg (code, AMD64_R11);
997 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
999 /* Restore registers */
1000 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1001 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1002 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1003 amd64_pop_reg (code, AMD64_RDX);
1004 amd64_pop_reg (code, AMD64_RAX);
1006 /* Return to original code */
1007 amd64_jump_reg (code, AMD64_R11);
1009 g_assert ((code - start) < 128);
1011 *code_size = code - start;
1016 static gpointer throw_pending_exception;
1019 * Called when a thread receives an async exception while executing unmanaged code.
1020 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1021 * the return address on the stack to point to a helper routine which throws the
1025 mono_arch_notify_pending_exc (void)
1027 MonoLMF *lmf = mono_get_lmf ();
1030 /* Not yet started */
1037 if ((guint64)lmf->previous_lmf & 1)
1038 /* Already hijacked or trampoline LMF entry */
1041 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1042 lmf->rip = *(guint64*)(lmf->rsp - 8);
1043 /* Signal that lmf->rip is set */
1044 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1046 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1050 mono_arch_exceptions_init (void)
1055 if (mono_aot_only) {
1056 throw_pending_exception = mono_aot_get_named_code ("throw_pending_exception");
1058 /* Call this to avoid initialization races */
1059 throw_pending_exception = mono_arch_get_throw_pending_exception_full (&code_size, &ji, FALSE);
1066 * The mono_arch_unwindinfo* methods are used to build and add
1067 * function table info for each emitted method from mono. On Winx64
1068 * the seh handler will not be called if the mono methods are not
1069 * added to the function table.
1071 * We should not need to add non-volatile register info to the
1072 * table since mono stores that info elsewhere. (Except for the register
1076 #define MONO_MAX_UNWIND_CODES 22
1078 typedef union _UNWIND_CODE {
1081 guchar UnwindOp : 4;
1084 gushort FrameOffset;
1085 } UNWIND_CODE, *PUNWIND_CODE;
1087 typedef struct _UNWIND_INFO {
1090 guchar SizeOfProlog;
1091 guchar CountOfCodes;
1092 guchar FrameRegister : 4;
1093 guchar FrameOffset : 4;
1094 /* custom size for mono allowing for mono allowing for*/
1095 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1096 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1097 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1098 /*UWOP_PUSH_NONVOL offset = 15-0*/
1099 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1101 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1103 * OPTIONAL ULONG ExceptionHandler;
1104 * OPTIONAL ULONG FunctionEntry;
1106 * OPTIONAL ULONG ExceptionData[]; */
1107 } UNWIND_INFO, *PUNWIND_INFO;
1111 RUNTIME_FUNCTION runtimeFunction;
1112 UNWIND_INFO unwindInfo;
1113 } MonoUnwindInfo, *PMonoUnwindInfo;
1116 mono_arch_unwindinfo_create (gpointer* monoui)
1118 PMonoUnwindInfo newunwindinfo;
1119 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1120 newunwindinfo->unwindInfo.Version = 1;
1124 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1126 PMonoUnwindInfo unwindinfo;
1127 PUNWIND_CODE unwindcode;
1130 mono_arch_unwindinfo_create (monoui);
1132 unwindinfo = (MonoUnwindInfo*)*monoui;
1134 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1135 g_error ("Larger allocation needed for the unwind information.");
1137 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1138 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1139 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1140 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1141 unwindcode->OpInfo = reg;
1143 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1144 g_error ("Adding unwind info in wrong order.");
1146 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1150 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1152 PMonoUnwindInfo unwindinfo;
1153 PUNWIND_CODE unwindcode;
1156 mono_arch_unwindinfo_create (monoui);
1158 unwindinfo = (MonoUnwindInfo*)*monoui;
1160 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1161 g_error ("Larger allocation needed for the unwind information.");
1163 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1164 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1165 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1167 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1168 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1169 unwindcode->OpInfo = reg;
1171 unwindinfo->unwindInfo.FrameRegister = reg;
1173 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1174 g_error ("Adding unwind info in wrong order.");
1176 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1180 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1182 PMonoUnwindInfo unwindinfo;
1183 PUNWIND_CODE unwindcode;
1187 mono_arch_unwindinfo_create (monoui);
1189 unwindinfo = (MonoUnwindInfo*)*monoui;
1192 g_error ("Stack allocation must be equal to or greater than 0x8.");
1196 else if (size <= 0x7FFF8)
1201 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1202 g_error ("Larger allocation needed for the unwind information.");
1204 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1205 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1207 if (codesneeded == 1) {
1208 /*The size of the allocation is
1209 (the number in the OpInfo member) times 8 plus 8*/
1210 unwindcode->OpInfo = (size - 8)/8;
1211 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1214 if (codesneeded == 3) {
1215 /*the unscaled size of the allocation is recorded
1216 in the next two slots in little-endian format*/
1217 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1219 unwindcode->OpInfo = 1;
1222 /*the size of the allocation divided by 8
1223 is recorded in the next slot*/
1224 unwindcode->FrameOffset = size/8;
1226 unwindcode->OpInfo = 0;
1229 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1232 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1234 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1235 g_error ("Adding unwind info in wrong order.");
1237 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1241 mono_arch_unwindinfo_get_size (gpointer monoui)
1243 PMonoUnwindInfo unwindinfo;
1247 unwindinfo = (MonoUnwindInfo*)monoui;
1248 return (8 + sizeof (MonoUnwindInfo)) -
1249 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1253 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1257 PMonoUnwindInfo targetinfo;
1258 MonoDomain *domain = mono_domain_get ();
1260 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1264 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1266 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1268 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1270 return &targetinfo->runtimeFunction;
1274 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1276 PMonoUnwindInfo unwindinfo, targetinfo;
1278 guint64 targetlocation;
1282 unwindinfo = (MonoUnwindInfo*)*monoui;
1283 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1284 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1286 unwindinfo->runtimeFunction.EndAddress = code_size;
1287 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1289 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1291 codecount = unwindinfo->unwindInfo.CountOfCodes;
1293 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1294 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1297 g_free (unwindinfo);
1300 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1305 #if MONO_SUPPORT_TASKLETS
1306 MonoContinuationRestore
1307 mono_tasklets_arch_restore (void)
1309 static guint8* saved = NULL;
1310 guint8 *code, *start;
1311 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1314 return (MonoContinuationRestore)saved;
1315 code = start = mono_global_codeman_reserve (64);
1316 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1317 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1318 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1319 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1320 * We move cont to cont_reg since we need both rcx and rdi for the copy
1321 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1323 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1324 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1325 /* setup the copy of the stack */
1326 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1327 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1329 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1330 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1331 amd64_prefix (code, X86_REP_PREFIX);
1334 /* now restore the registers from the LMF */
1335 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1336 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1337 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1338 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1339 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1340 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1341 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1343 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1344 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1346 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1348 /* restore the lmf chain */
1349 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1350 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1352 /* state is already in rax */
1353 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1354 g_assert ((code - start) <= 64);
1356 return (MonoContinuationRestore)saved;