2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #ifdef HAVE_UCONTEXT_H
18 #include <mono/arch/amd64/amd64-codegen.h>
19 #include <mono/metadata/appdomain.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/threads.h>
22 #include <mono/metadata/threads-types.h>
23 #include <mono/metadata/debug-helpers.h>
24 #include <mono/metadata/exception.h>
25 #include <mono/metadata/gc-internal.h>
26 #include <mono/metadata/mono-debug.h>
27 #include <mono/utils/mono-mmap.h>
30 #include "mini-amd64.h"
32 #include "debug-mini.h"
34 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
37 static MonoW32ExceptionHandler fpe_handler;
38 static MonoW32ExceptionHandler ill_handler;
39 static MonoW32ExceptionHandler segv_handler;
41 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
43 #define W32_SEH_HANDLE_EX(_ex) \
44 if (_ex##_handler) _ex##_handler(0, er, sctx)
47 * Unhandled Exception Filter
48 * Top-level per-process exception handler.
50 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
57 res = EXCEPTION_CONTINUE_EXECUTION;
59 er = ep->ExceptionRecord;
60 ctx = ep->ContextRecord;
61 sctx = g_malloc(sizeof(MonoContext));
63 /* Copy Win32 context to UNIX style context */
78 switch (er->ExceptionCode) {
79 case EXCEPTION_ACCESS_VIOLATION:
80 W32_SEH_HANDLE_EX(segv);
82 case EXCEPTION_ILLEGAL_INSTRUCTION:
83 W32_SEH_HANDLE_EX(ill);
85 case EXCEPTION_INT_DIVIDE_BY_ZERO:
86 case EXCEPTION_INT_OVERFLOW:
87 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
88 case EXCEPTION_FLT_OVERFLOW:
89 case EXCEPTION_FLT_UNDERFLOW:
90 case EXCEPTION_FLT_INEXACT_RESULT:
91 W32_SEH_HANDLE_EX(fpe);
97 /* Copy context back */
100 ctx->Rdi = sctx->rdi;
101 ctx->Rsi = sctx->rsi;
102 ctx->Rbx = sctx->rbx;
103 ctx->Rbp = sctx->rbp;
104 ctx->R12 = sctx->r12;
105 ctx->R13 = sctx->r13;
106 ctx->R14 = sctx->r14;
107 ctx->R15 = sctx->r15;
108 ctx->Rip = sctx->rip;
110 /* Volatile But should not matter?*/
111 ctx->Rax = sctx->rax;
112 ctx->Rcx = sctx->rcx;
113 ctx->Rdx = sctx->rdx;
120 void win32_seh_init()
122 old_handler = SetUnhandledExceptionFilter(seh_handler);
125 void win32_seh_cleanup()
127 if (old_handler) SetUnhandledExceptionFilter(old_handler);
130 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
134 fpe_handler = handler;
137 ill_handler = handler;
140 segv_handler = handler;
147 #endif /* TARGET_WIN32 */
150 * mono_arch_get_restore_context:
152 * Returns a pointer to a method which restores a previously saved sigcontext.
155 mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
157 guint8 *start = NULL;
159 MonoJumpInfo *ji = NULL;
160 GSList *unwind_ops = NULL;
162 /* restore_contect (MonoContext *ctx) */
164 start = code = mono_global_codeman_reserve (256);
166 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
168 /* Restore all registers except %rip and %r11 */
169 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
170 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
171 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
172 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
173 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
174 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
175 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
176 //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
177 //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
178 //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
179 amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
180 amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
181 amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
182 #if !defined(__native_client_codegen__)
183 amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
186 if (mono_running_on_valgrind ()) {
187 /* Prevent 'Address 0x... is just below the stack ptr.' errors */
188 amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
189 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
190 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
192 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
193 /* get return address */
194 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
197 /* jump to the saved IP */
198 amd64_jump_reg (code, AMD64_R11);
200 nacl_global_codeman_validate(&start, 256, &code);
202 mono_arch_flush_icache (start, code - start);
205 *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
211 * mono_arch_get_call_filter:
213 * Returns a pointer to a method which calls an exception filter. We
214 * also use this function to call finally handlers (we pass NULL as
215 * @exc object in this case).
218 mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
224 MonoJumpInfo *ji = NULL;
225 GSList *unwind_ops = NULL;
226 const guint kMaxCodeSize = NACL_SIZE (128, 256);
228 start = code = mono_global_codeman_reserve (kMaxCodeSize);
230 /* call_filter (MonoContext *ctx, unsigned long eip) */
233 /* Alloc new frame */
234 amd64_push_reg (code, AMD64_RBP);
235 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
237 /* Save callee saved regs */
239 for (i = 0; i < AMD64_NREG; ++i)
240 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
241 amd64_push_reg (code, i);
247 amd64_push_reg (code, AMD64_RBP);
249 /* Make stack misaligned, the call will make it aligned again */
251 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
254 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
255 /* load callee saved regs */
256 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
257 amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
258 amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
259 amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
260 #if !defined(__native_client_codegen__)
261 amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
264 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
265 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
268 /* call the handler */
269 amd64_call_reg (code, AMD64_ARG_REG2);
272 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
275 amd64_pop_reg (code, AMD64_RBP);
277 /* Restore callee saved regs */
278 for (i = AMD64_NREG; i >= 0; --i)
279 if (AMD64_IS_CALLEE_SAVED_REG (i))
280 amd64_pop_reg (code, i);
285 g_assert ((code - start) < kMaxCodeSize);
287 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
289 mono_arch_flush_icache (start, code - start);
292 *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
298 * The first few arguments are dummy, to force the other arguments to be passed on
299 * the stack, this avoids overwriting the argument registers in the throw trampoline.
302 mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
303 guint64 dummy5, guint64 dummy6,
304 mgreg_t *regs, mgreg_t rip,
305 MonoObject *exc, gboolean rethrow)
307 static void (*restore_context) (MonoContext *);
310 if (!restore_context)
311 restore_context = mono_get_restore_context ();
313 ctx.rsp = regs [AMD64_RSP];
315 ctx.rbx = regs [AMD64_RBX];
316 ctx.rbp = regs [AMD64_RBP];
317 ctx.r12 = regs [AMD64_R12];
318 ctx.r13 = regs [AMD64_R13];
319 ctx.r14 = regs [AMD64_R14];
320 ctx.r15 = regs [AMD64_R15];
321 ctx.rdi = regs [AMD64_RDI];
322 ctx.rsi = regs [AMD64_RSI];
323 ctx.rax = regs [AMD64_RAX];
324 ctx.rcx = regs [AMD64_RCX];
325 ctx.rdx = regs [AMD64_RDX];
327 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
328 MonoException *mono_ex = (MonoException*)exc;
330 mono_ex->stack_trace = NULL;
333 if (mono_debug_using_mono_debugger ()) {
334 guint8 buf [16], *code;
336 mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
339 if (buf [3] == 0xe8) {
340 MonoContext ctx_cp = ctx;
341 ctx_cp.rip = rip - 5;
343 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
344 restore_context (&ctx_cp);
345 g_assert_not_reached ();
350 /* adjust eip so that it point into the call instruction */
353 mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
354 restore_context (&ctx);
356 g_assert_not_reached ();
360 mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
361 guint64 dummy5, guint64 dummy6,
362 mgreg_t *regs, mgreg_t rip,
363 guint32 ex_token_index, gint64 pc_offset)
365 guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
368 ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
372 /* Negate the ip adjustment done in mono_amd64_throw_exception () */
375 mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE);
379 mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
380 guint64 dummy5, guint64 dummy6,
381 mgreg_t *regs, mgreg_t rip,
382 guint32 dummy7, gint64 dummy8)
384 /* Only the register parameters are valid */
387 ctx.rsp = regs [AMD64_RSP];
389 ctx.rbx = regs [AMD64_RBX];
390 ctx.rbp = regs [AMD64_RBP];
391 ctx.r12 = regs [AMD64_R12];
392 ctx.r13 = regs [AMD64_R13];
393 ctx.r14 = regs [AMD64_R14];
394 ctx.r15 = regs [AMD64_R15];
395 ctx.rdi = regs [AMD64_RDI];
396 ctx.rsi = regs [AMD64_RSI];
397 ctx.rax = regs [AMD64_RAX];
398 ctx.rcx = regs [AMD64_RCX];
399 ctx.rdx = regs [AMD64_RDX];
401 mono_resume_unwind (&ctx);
405 * get_throw_trampoline:
407 * Generate a call to mono_amd64_throw_exception/
408 * mono_amd64_throw_corlib_exception.
411 get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
415 MonoJumpInfo *ji = NULL;
416 GSList *unwind_ops = NULL;
417 int i, stack_size, arg_offsets [16], regs_offset;
418 const guint kMaxCodeSize = NACL_SIZE (256, 512);
420 start = code = mono_global_codeman_reserve (kMaxCodeSize);
422 /* The stack is unaligned on entry */
423 stack_size = 192 + 8;
428 unwind_ops = mono_arch_get_cie_program ();
431 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
433 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
436 * To hide linux/windows calling convention differences, we pass all arguments on
437 * the stack by passing 6 dummy values in registers.
441 arg_offsets [1] = sizeof(mgreg_t);
442 arg_offsets [2] = sizeof(mgreg_t) * 2;
443 arg_offsets [3] = sizeof(mgreg_t) * 3;
444 regs_offset = sizeof(mgreg_t) * 4;
447 for (i = 0; i < AMD64_NREG; ++i)
449 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
451 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
452 amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
453 /* Set arg1 == regs */
454 amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
455 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
456 /* Set arg2 == eip */
458 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
460 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
461 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
462 /* Set arg3 == exc/ex_token_index */
464 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
466 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
467 /* Set arg4 == rethrow/pc offset */
469 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
471 amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
474 * The caller is LLVM code which passes the absolute address not a pc offset,
475 * so compensate by passing 0 as 'rip' and passing the negated abs address as
478 amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
480 amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
484 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
485 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
487 amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? (mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
489 amd64_call_reg (code, AMD64_R11);
490 amd64_breakpoint (code);
492 mono_arch_flush_icache (start, code - start);
494 g_assert ((code - start) < kMaxCodeSize);
496 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
499 *info = mono_tramp_info_create (g_strdup (tramp_name), start, code - start, ji, unwind_ops);
505 * mono_arch_get_throw_exception:
507 * Returns a function pointer which can be used to raise
508 * exceptions. The returned function has the following
509 * signature: void (*func) (MonoException *exc);
513 mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
515 return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
519 mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
521 return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
525 * mono_arch_get_throw_corlib_exception:
527 * Returns a function pointer which can be used to raise
528 * corlib exceptions. The returned function has the following
529 * signature: void (*func) (guint32 ex_token, guint32 offset);
530 * Here, offset is the offset which needs to be substracted from the caller IP
531 * to get the IP of the throw. Passing the offset has the advantage that it
532 * needs no relocations in the caller.
535 mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
537 return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
541 * mono_arch_find_jit_info:
543 * This function is used to gather information from @ctx, and store it in @frame_info.
544 * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
545 * is modified if needed.
546 * Returns TRUE on success, FALSE otherwise.
549 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
550 MonoJitInfo *ji, MonoContext *ctx,
551 MonoContext *new_ctx, MonoLMF **lmf,
552 mgreg_t **save_locations,
553 StackFrameInfo *frame)
555 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
557 memset (frame, 0, sizeof (StackFrameInfo));
563 mgreg_t regs [MONO_MAX_IREGS + 1];
565 guint32 unwind_info_len;
568 frame->type = FRAME_TYPE_MANAGED;
571 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
573 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
575 frame->unwind_info = unwind_info;
576 frame->unwind_info_len = unwind_info_len;
578 regs [AMD64_RAX] = new_ctx->rax;
579 regs [AMD64_RBX] = new_ctx->rbx;
580 regs [AMD64_RCX] = new_ctx->rcx;
581 regs [AMD64_RDX] = new_ctx->rdx;
582 regs [AMD64_RBP] = new_ctx->rbp;
583 regs [AMD64_RSP] = new_ctx->rsp;
584 regs [AMD64_RSI] = new_ctx->rsi;
585 regs [AMD64_RDI] = new_ctx->rdi;
586 regs [AMD64_RIP] = new_ctx->rip;
587 regs [AMD64_R12] = new_ctx->r12;
588 regs [AMD64_R13] = new_ctx->r13;
589 regs [AMD64_R14] = new_ctx->r14;
590 regs [AMD64_R15] = new_ctx->r15;
592 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
593 (guint8*)ji->code_start + ji->code_size,
594 ip, regs, MONO_MAX_IREGS + 1,
595 save_locations, MONO_MAX_IREGS, &cfa);
597 new_ctx->rax = regs [AMD64_RAX];
598 new_ctx->rbx = regs [AMD64_RBX];
599 new_ctx->rcx = regs [AMD64_RCX];
600 new_ctx->rdx = regs [AMD64_RDX];
601 new_ctx->rbp = regs [AMD64_RBP];
602 new_ctx->rsp = regs [AMD64_RSP];
603 new_ctx->rsi = regs [AMD64_RSI];
604 new_ctx->rdi = regs [AMD64_RDI];
605 new_ctx->rip = regs [AMD64_RIP];
606 new_ctx->r12 = regs [AMD64_R12];
607 new_ctx->r13 = regs [AMD64_R13];
608 new_ctx->r14 = regs [AMD64_R14];
609 new_ctx->r15 = regs [AMD64_R15];
611 /* The CFA becomes the new SP value */
612 new_ctx->rsp = (mgreg_t)cfa;
617 if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
618 /* remove any unused lmf */
619 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
622 #ifndef MONO_AMD64_NO_PUSHES
623 /* Pop arguments off the stack */
625 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
627 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
628 new_ctx->rsp += stack_to_pop;
636 if (((guint64)(*lmf)->previous_lmf) & 2) {
638 * This LMF entry is created by the soft debug code to mark transitions to
639 * managed code done during invokes.
641 MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
643 g_assert (ext->debugger_invoke);
645 memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
647 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
649 frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
654 if (((guint64)(*lmf)->previous_lmf) & 1) {
655 /* This LMF has the rip field set */
657 } else if ((*lmf)->rsp == 0) {
662 * The rsp field is set just before the call which transitioned to native
663 * code. Obtain the rip from the stack.
665 rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
668 ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
670 * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
671 * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
682 frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
685 new_ctx->rbp = (*lmf)->rbp;
686 new_ctx->rsp = (*lmf)->rsp;
688 new_ctx->rbx = (*lmf)->rbx;
689 new_ctx->r12 = (*lmf)->r12;
690 new_ctx->r13 = (*lmf)->r13;
691 new_ctx->r14 = (*lmf)->r14;
692 new_ctx->r15 = (*lmf)->r15;
694 new_ctx->rdi = (*lmf)->rdi;
695 new_ctx->rsi = (*lmf)->rsi;
698 *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
709 * Called by resuming from a signal handler.
712 handle_signal_exception (gpointer obj, gboolean test_only)
714 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
716 static void (*restore_context) (MonoContext *);
718 if (!restore_context)
719 restore_context = mono_get_restore_context ();
721 memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
723 if (mono_debugger_handle_exception (&ctx, (MonoObject *)obj))
726 mono_handle_exception (&ctx, obj, MONO_CONTEXT_GET_IP (&ctx), test_only);
728 restore_context (&ctx);
732 * mono_arch_handle_exception:
734 * @ctx: saved processor state
735 * @obj: the exception object
738 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
740 #if defined(MONO_ARCH_USE_SIGACTION)
741 ucontext_t *ctx = (ucontext_t*)sigctx;
744 * Handling the exception in the signal handler is problematic, since the original
745 * signal is disabled, and we could run arbitrary code though the debugger. So
746 * resume into the normal stack and do most work there if possible.
748 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
749 guint64 sp = UCONTEXT_REG_RSP (ctx);
751 /* Pass the ctx parameter in TLS */
752 mono_arch_sigctx_to_monoctx (ctx, &jit_tls->ex_ctx);
753 /* The others in registers */
754 UCONTEXT_REG_RDI (ctx) = (guint64)obj;
755 UCONTEXT_REG_RSI (ctx) = test_only;
757 /* Allocate a stack frame below the red zone */
759 /* The stack should be unaligned */
762 UCONTEXT_REG_RSP (ctx) = sp;
764 UCONTEXT_REG_RIP (ctx) = (guint64)handle_signal_exception;
770 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
772 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
775 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
777 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
784 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
786 #if defined(__native_client_codegen__) || defined(__native_client__)
787 printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
790 #if defined(MONO_ARCH_USE_SIGACTION)
791 ucontext_t *ctx = (ucontext_t*)sigctx;
793 mctx->rax = UCONTEXT_REG_RAX (ctx);
794 mctx->rbx = UCONTEXT_REG_RBX (ctx);
795 mctx->rcx = UCONTEXT_REG_RCX (ctx);
796 mctx->rdx = UCONTEXT_REG_RDX (ctx);
797 mctx->rbp = UCONTEXT_REG_RBP (ctx);
798 mctx->rsp = UCONTEXT_REG_RSP (ctx);
799 mctx->rsi = UCONTEXT_REG_RSI (ctx);
800 mctx->rdi = UCONTEXT_REG_RDI (ctx);
801 mctx->rip = UCONTEXT_REG_RIP (ctx);
802 mctx->r12 = UCONTEXT_REG_R12 (ctx);
803 mctx->r13 = UCONTEXT_REG_R13 (ctx);
804 mctx->r14 = UCONTEXT_REG_R14 (ctx);
805 mctx->r15 = UCONTEXT_REG_R15 (ctx);
807 MonoContext *ctx = (MonoContext *)sigctx;
809 mctx->rax = ctx->rax;
810 mctx->rbx = ctx->rbx;
811 mctx->rcx = ctx->rcx;
812 mctx->rdx = ctx->rdx;
813 mctx->rbp = ctx->rbp;
814 mctx->rsp = ctx->rsp;
815 mctx->rsi = ctx->rsi;
816 mctx->rdi = ctx->rdi;
817 mctx->rip = ctx->rip;
818 mctx->r12 = ctx->r12;
819 mctx->r13 = ctx->r13;
820 mctx->r14 = ctx->r14;
821 mctx->r15 = ctx->r15;
826 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
828 #if defined(__native_client__) || defined(__native_client_codegen__)
829 printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
832 #if defined(MONO_ARCH_USE_SIGACTION)
833 ucontext_t *ctx = (ucontext_t*)sigctx;
835 UCONTEXT_REG_RAX (ctx) = mctx->rax;
836 UCONTEXT_REG_RBX (ctx) = mctx->rbx;
837 UCONTEXT_REG_RCX (ctx) = mctx->rcx;
838 UCONTEXT_REG_RDX (ctx) = mctx->rdx;
839 UCONTEXT_REG_RBP (ctx) = mctx->rbp;
840 UCONTEXT_REG_RSP (ctx) = mctx->rsp;
841 UCONTEXT_REG_RSI (ctx) = mctx->rsi;
842 UCONTEXT_REG_RDI (ctx) = mctx->rdi;
843 UCONTEXT_REG_RIP (ctx) = mctx->rip;
844 UCONTEXT_REG_R12 (ctx) = mctx->r12;
845 UCONTEXT_REG_R13 (ctx) = mctx->r13;
846 UCONTEXT_REG_R14 (ctx) = mctx->r14;
847 UCONTEXT_REG_R15 (ctx) = mctx->r15;
849 MonoContext *ctx = (MonoContext *)sigctx;
851 ctx->rax = mctx->rax;
852 ctx->rbx = mctx->rbx;
853 ctx->rcx = mctx->rcx;
854 ctx->rdx = mctx->rdx;
855 ctx->rbp = mctx->rbp;
856 ctx->rsp = mctx->rsp;
857 ctx->rsi = mctx->rsi;
858 ctx->rdi = mctx->rdi;
859 ctx->rip = mctx->rip;
860 ctx->r12 = mctx->r12;
861 ctx->r13 = mctx->r13;
862 ctx->r14 = mctx->r14;
863 ctx->r15 = mctx->r15;
868 mono_arch_ip_from_context (void *sigctx)
870 #if defined(MONO_ARCH_USE_SIGACTION)
871 ucontext_t *ctx = (ucontext_t*)sigctx;
873 return (gpointer)UCONTEXT_REG_RIP (ctx);
875 MonoContext *ctx = sigctx;
876 return (gpointer)ctx->rip;
881 restore_soft_guard_pages (void)
883 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
884 if (jit_tls->stack_ovf_guard_base)
885 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
889 * this function modifies mctx so that when it is restored, it
890 * won't execcute starting at mctx.eip, but in a function that
891 * will restore the protection on the soft-guard pages and return back to
892 * continue at mctx.eip.
895 prepare_for_guard_pages (MonoContext *mctx)
898 sp = (gpointer)(mctx->rsp);
900 /* the return addr */
901 sp [0] = (gpointer)(mctx->rip);
902 mctx->rip = (guint64)restore_soft_guard_pages;
903 mctx->rsp = (guint64)sp;
907 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
909 void (*restore_context) (MonoContext *);
912 restore_context = mono_get_restore_context ();
913 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
915 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
917 prepare_for_guard_pages (&mctx);
918 restore_context (&mctx);
921 mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
923 prepare_for_guard_pages (&mctx);
924 restore_context (&mctx);
928 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
930 #if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
931 MonoException *exc = NULL;
932 ucontext_t *ctx = (ucontext_t*)sigctx;
933 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL);
938 exc = mono_domain_get ()->stack_overflow_ex;
940 mono_handle_native_sigsegv (SIGSEGV, sigctx);
942 /* setup a call frame on the real stack so that control is returned there
943 * and exception handling can continue.
944 * The frame looks like:
948 * 128 is the size of the red zone
950 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
953 sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15);
954 sp = (gpointer)((char*)sp - frame_size);
955 /* the arguments must be aligned */
956 sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
957 /* may need to adjust pointers in the new struct copy, depending on the OS */
958 memcpy (sp + 4, ctx, sizeof (ucontext_t));
959 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
960 UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
961 UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
962 UCONTEXT_REG_RDI (sigctx) = (unsigned long)(sp + 4);
963 UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
964 UCONTEXT_REG_RDX (sigctx) = stack_ovf;
969 mono_amd64_get_original_ip (void)
971 MonoLMF *lmf = mono_get_lmf ();
975 /* Reset the change to previous_lmf */
976 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
982 mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
984 guint8 *code, *start;
986 gpointer throw_trampoline;
987 MonoJumpInfo *ji = NULL;
988 GSList *unwind_ops = NULL;
989 const guint kMaxCodeSize = NACL_SIZE (128, 256);
991 start = code = mono_global_codeman_reserve (kMaxCodeSize);
993 /* We are in the frame of a managed method after a call */
995 * We would like to throw the pending exception in such a way that it looks to
996 * be thrown from the managed method.
999 /* Save registers which might contain the return value of the call */
1000 amd64_push_reg (code, AMD64_RAX);
1001 amd64_push_reg (code, AMD64_RDX);
1003 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1004 amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
1007 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1009 /* Obtain the pending exception */
1011 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
1012 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1014 amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
1016 amd64_call_reg (code, AMD64_R11);
1018 /* Check if it is NULL, and branch */
1019 amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
1020 br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
1022 /* exc != NULL branch */
1024 /* Save the exc on the stack */
1025 amd64_push_reg (code, AMD64_RAX);
1027 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
1029 /* Obtain the original ip and clear the flag in previous_lmf */
1031 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1032 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1034 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1036 amd64_call_reg (code, AMD64_R11);
1039 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
1041 /* Pop saved stuff from the stack */
1042 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
1044 /* Setup arguments for the throw trampoline */
1046 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
1047 /* The trampoline expects the caller ip to be pushed on the stack */
1048 amd64_push_reg (code, AMD64_RAX);
1050 /* Call the throw trampoline */
1052 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
1053 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1055 throw_trampoline = mono_get_throw_exception ();
1056 amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
1058 /* We use a jump instead of a call so we can push the original ip on the stack */
1059 amd64_jump_reg (code, AMD64_R11);
1061 /* ex == NULL branch */
1062 mono_amd64_patch (br [0], code);
1064 /* Obtain the original ip and clear the flag in previous_lmf */
1066 ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
1067 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1069 amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
1071 amd64_call_reg (code, AMD64_R11);
1072 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
1074 /* Restore registers */
1075 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1076 amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
1077 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
1078 amd64_pop_reg (code, AMD64_RDX);
1079 amd64_pop_reg (code, AMD64_RAX);
1081 /* Return to original code */
1082 amd64_jump_reg (code, AMD64_R11);
1084 g_assert ((code - start) < kMaxCodeSize);
1086 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1089 *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
1094 static gpointer throw_pending_exception;
1097 * Called when a thread receives an async exception while executing unmanaged code.
1098 * Instead of checking for this exception in the managed-to-native wrapper, we hijack
1099 * the return address on the stack to point to a helper routine which throws the
1103 mono_arch_notify_pending_exc (void)
1105 MonoLMF *lmf = mono_get_lmf ();
1108 /* Not yet started */
1115 if ((guint64)lmf->previous_lmf & 1)
1116 /* Already hijacked or trampoline LMF entry */
1119 /* lmf->rsp is set just before making the call which transitions to unmanaged code */
1120 lmf->rip = *(guint64*)(lmf->rsp - 8);
1121 /* Signal that lmf->rip is set */
1122 lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
1124 *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
1128 mono_amd64_get_exception_trampolines (gboolean aot)
1130 MonoTrampInfo *info;
1131 GSList *tramps = NULL;
1133 mono_arch_get_throw_pending_exception (&info, aot);
1134 tramps = g_slist_prepend (tramps, info);
1136 /* LLVM needs different throw trampolines */
1137 get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
1138 tramps = g_slist_prepend (tramps, info);
1140 get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
1141 tramps = g_slist_prepend (tramps, info);
1143 get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
1144 tramps = g_slist_prepend (tramps, info);
1150 mono_arch_exceptions_init (void)
1155 if (mono_aot_only) {
1156 throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
1157 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
1158 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
1159 tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
1160 mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
1161 tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
1162 mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
1164 /* Call this to avoid initialization races */
1165 throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
1167 tramps = mono_amd64_get_exception_trampolines (FALSE);
1168 for (l = tramps; l; l = l->next) {
1169 MonoTrampInfo *info = l->data;
1171 mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
1172 mono_save_trampoline_xdebug_info (info);
1173 mono_tramp_info_free (info);
1175 g_slist_free (tramps);
1182 * The mono_arch_unwindinfo* methods are used to build and add
1183 * function table info for each emitted method from mono. On Winx64
1184 * the seh handler will not be called if the mono methods are not
1185 * added to the function table.
1187 * We should not need to add non-volatile register info to the
1188 * table since mono stores that info elsewhere. (Except for the register
1192 #define MONO_MAX_UNWIND_CODES 22
1194 typedef union _UNWIND_CODE {
1197 guchar UnwindOp : 4;
1200 gushort FrameOffset;
1201 } UNWIND_CODE, *PUNWIND_CODE;
1203 typedef struct _UNWIND_INFO {
1206 guchar SizeOfProlog;
1207 guchar CountOfCodes;
1208 guchar FrameRegister : 4;
1209 guchar FrameOffset : 4;
1210 /* custom size for mono allowing for mono allowing for*/
1211 /*UWOP_PUSH_NONVOL ebp offset = 21*/
1212 /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
1213 /*UWOP_SET_FPREG : requires 2 offset = 17*/
1214 /*UWOP_PUSH_NONVOL offset = 15-0*/
1215 UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
1217 /* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
1219 * OPTIONAL ULONG ExceptionHandler;
1220 * OPTIONAL ULONG FunctionEntry;
1222 * OPTIONAL ULONG ExceptionData[]; */
1223 } UNWIND_INFO, *PUNWIND_INFO;
1227 RUNTIME_FUNCTION runtimeFunction;
1228 UNWIND_INFO unwindInfo;
1229 } MonoUnwindInfo, *PMonoUnwindInfo;
1232 mono_arch_unwindinfo_create (gpointer* monoui)
1234 PMonoUnwindInfo newunwindinfo;
1235 *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
1236 newunwindinfo->unwindInfo.Version = 1;
1240 mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1242 PMonoUnwindInfo unwindinfo;
1243 PUNWIND_CODE unwindcode;
1246 mono_arch_unwindinfo_create (monoui);
1248 unwindinfo = (MonoUnwindInfo*)*monoui;
1250 if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
1251 g_error ("Larger allocation needed for the unwind information.");
1253 codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
1254 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1255 unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
1256 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1257 unwindcode->OpInfo = reg;
1259 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1260 g_error ("Adding unwind info in wrong order.");
1262 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1266 mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
1268 PMonoUnwindInfo unwindinfo;
1269 PUNWIND_CODE unwindcode;
1272 mono_arch_unwindinfo_create (monoui);
1274 unwindinfo = (MonoUnwindInfo*)*monoui;
1276 if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
1277 g_error ("Larger allocation needed for the unwind information.");
1279 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
1280 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1281 unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
1283 unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
1284 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1285 unwindcode->OpInfo = reg;
1287 unwindinfo->unwindInfo.FrameRegister = reg;
1289 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1290 g_error ("Adding unwind info in wrong order.");
1292 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1296 mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
1298 PMonoUnwindInfo unwindinfo;
1299 PUNWIND_CODE unwindcode;
1303 mono_arch_unwindinfo_create (monoui);
1305 unwindinfo = (MonoUnwindInfo*)*monoui;
1308 g_error ("Stack allocation must be equal to or greater than 0x8.");
1312 else if (size <= 0x7FFF8)
1317 if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
1318 g_error ("Larger allocation needed for the unwind information.");
1320 codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
1321 unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
1323 if (codesneeded == 1) {
1324 /*The size of the allocation is
1325 (the number in the OpInfo member) times 8 plus 8*/
1326 unwindcode->OpInfo = (size - 8)/8;
1327 unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
1330 if (codesneeded == 3) {
1331 /*the unscaled size of the allocation is recorded
1332 in the next two slots in little-endian format*/
1333 *((unsigned int*)(&unwindcode->FrameOffset)) = size;
1335 unwindcode->OpInfo = 1;
1338 /*the size of the allocation divided by 8
1339 is recorded in the next slot*/
1340 unwindcode->FrameOffset = size/8;
1342 unwindcode->OpInfo = 0;
1345 unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
1348 unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
1350 if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
1351 g_error ("Adding unwind info in wrong order.");
1353 unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
1357 mono_arch_unwindinfo_get_size (gpointer monoui)
1359 PMonoUnwindInfo unwindinfo;
1363 unwindinfo = (MonoUnwindInfo*)monoui;
1364 return (8 + sizeof (MonoUnwindInfo)) -
1365 (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
1369 MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
1373 PMonoUnwindInfo targetinfo;
1374 MonoDomain *domain = mono_domain_get ();
1376 ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
1380 pos = (guint64)(((char*)ji->code_start) + ji->code_size);
1382 targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
1384 targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
1386 return &targetinfo->runtimeFunction;
1390 mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
1392 PMonoUnwindInfo unwindinfo, targetinfo;
1394 guint64 targetlocation;
1398 unwindinfo = (MonoUnwindInfo*)*monoui;
1399 targetlocation = (guint64)&(((guchar*)code)[code_size]);
1400 targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
1402 unwindinfo->runtimeFunction.EndAddress = code_size;
1403 unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
1405 memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
1407 codecount = unwindinfo->unwindInfo.CountOfCodes;
1409 memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
1410 sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
1413 g_free (unwindinfo);
1416 RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
1421 #if MONO_SUPPORT_TASKLETS
1422 MonoContinuationRestore
1423 mono_tasklets_arch_restore (void)
1425 static guint8* saved = NULL;
1426 guint8 *code, *start;
1427 int cont_reg = AMD64_R9; /* register usable on both call conventions */
1428 const guint kMaxCodeSize = NACL_SIZE (64, 128);
1432 return (MonoContinuationRestore)saved;
1433 code = start = mono_global_codeman_reserve (kMaxCodeSize);
1434 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
1435 /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
1436 * state is in AMD64_ARG_REG2 ($rdx or $rsi)
1437 * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
1438 * We move cont to cont_reg since we need both rcx and rdi for the copy
1439 * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
1441 amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
1442 amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
1443 /* setup the copy of the stack */
1444 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
1445 amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
1447 amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
1448 amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
1449 amd64_prefix (code, X86_REP_PREFIX);
1452 /* now restore the registers from the LMF */
1453 amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
1454 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
1455 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
1456 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
1457 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
1458 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
1459 #if !defined(__native_client_codegen__)
1460 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
1463 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
1464 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
1466 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
1468 /* restore the lmf chain */
1469 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
1470 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
1472 /* state is already in rax */
1473 amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
1474 g_assert ((code - start) <= kMaxCodeSize);
1476 nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
1479 return (MonoContinuationRestore)saved;
1484 * mono_arch_setup_resume_sighandler_ctx:
1486 * Setup CTX so execution continues at FUNC.
1489 mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
1492 * When resuming from a signal handler, the stack should be misaligned, just like right after
1495 if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
1496 MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
1497 MONO_CONTEXT_SET_IP (ctx, func);