X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fexceptions-amd64.c;h=e683bf2a11fcf6080ab66869911056d967764151;hb=489ec324d3aad619b93a5100879245bff88ef5db;hp=d56962d12b09ae7baec775c51ffd35a0dbcd7175;hpb=a5e40870bd3bb18e1681afed6c71e7edfdb80534;p=mono.git diff --git a/mono/mini/exceptions-amd64.c b/mono/mini/exceptions-amd64.c index d56962d12b0..e683bf2a11f 100644 --- a/mono/mini/exceptions-amd64.c +++ b/mono/mini/exceptions-amd64.c @@ -11,8 +11,8 @@ #include #include #include -#ifndef PLATFORM_WIN32 -#include +#ifdef HAVE_UCONTEXT_H +#include #endif #include @@ -28,10 +28,12 @@ #include "mini.h" #include "mini-amd64.h" +#include "tasklets.h" +#include "debug-mini.h" #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1)) -#ifdef PLATFORM_WIN32 +#ifdef TARGET_WIN32 static MonoW32ExceptionHandler fpe_handler; static MonoW32ExceptionHandler ill_handler; static MonoW32ExceptionHandler segv_handler; @@ -39,7 +41,7 @@ static MonoW32ExceptionHandler segv_handler; static LPTOP_LEVEL_EXCEPTION_FILTER old_handler; #define W32_SEH_HANDLE_EX(_ex) \ - if (_ex##_handler) _ex##_handler((int)sctx) + if (_ex##_handler) _ex##_handler(0, er, sctx) /* * Unhandled Exception Filter @@ -142,7 +144,7 @@ void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler) } } -#endif /* PLATFORM_WIN32 */ +#endif /* TARGET_WIN32 */ /* * mono_arch_get_restore_context: @@ -150,15 +152,15 @@ void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler) * Returns a pointer to a method which restores a previously saved sigcontext. */ gpointer -mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot) { guint8 *start = NULL; guint8 *code; + MonoJumpInfo *ji = NULL; + GSList *unwind_ops = NULL; /* restore_contect (MonoContext *ctx) */ - *ji = NULL; - start = code = mono_global_codeman_reserve (256); amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8); @@ -195,7 +197,8 @@ mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gbool mono_arch_flush_icache (start, code - start); - *code_size = code - start; + if (info) + *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops); return start; } @@ -208,14 +211,14 @@ mono_arch_get_restore_context_full (guint32 *code_size, MonoJumpInfo **ji, gbool * @exc object in this case). */ gpointer -mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot) { guint8 *start; int i; guint8 *code; guint32 pos; - - *ji = NULL; + MonoJumpInfo *ji = NULL; + GSList *unwind_ops = NULL; start = code = mono_global_codeman_reserve (128); @@ -250,7 +253,7 @@ mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8); amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8); amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8); -#ifdef PLATFORM_WIN32 +#ifdef TARGET_WIN32 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8); amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8); #endif @@ -276,7 +279,8 @@ mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean mono_arch_flush_icache (start, code - start); - *code_size = code - start; + if (info) + *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops); return start; } @@ -288,11 +292,8 @@ mono_arch_get_call_filter_full (guint32 *code_size, MonoJumpInfo **ji, gboolean void mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4, guint64 dummy5, guint64 dummy6, - MonoObject *exc, guint64 rip, guint64 rsp, - guint64 rbx, guint64 rbp, guint64 r12, guint64 r13, - guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, - guint64 rax, guint64 rcx, guint64 rdx, - guint64 rethrow) + mgreg_t *regs, mgreg_t rip, + MonoObject *exc, gboolean rethrow) { static void (*restore_context) (MonoContext *); MonoContext ctx; @@ -300,119 +301,191 @@ mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guin if (!restore_context) restore_context = mono_get_restore_context (); - ctx.rsp = rsp; + ctx.rsp = regs [AMD64_RSP]; ctx.rip = rip; - ctx.rbx = rbx; - ctx.rbp = rbp; - ctx.r12 = r12; - ctx.r13 = r13; - ctx.r14 = r14; - ctx.r15 = r15; - ctx.rdi = rdi; - ctx.rsi = rsi; - ctx.rax = rax; - ctx.rcx = rcx; - ctx.rdx = rdx; - - if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) { - /* - * The debugger wants us to stop on the `throw' instruction. - * By the time we get here, it already inserted a breakpoint on - * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw'). - */ - - /* FIXME FIXME - * - * In case of a rethrow, the JIT is emitting code like this: - * - * mov 0xffffffffffffffd0(%rbp),%rax' - * mov %rax,%rdi - * callq throw - * - * Here, restore_context() wouldn't restore the %rax register correctly. - */ - ctx.rip = rip - 8; - ctx.rsp = rsp + 8; - restore_context (&ctx); - g_assert_not_reached (); - } - - /* adjust eip so that it point into the call instruction */ - ctx.rip -= 1; + ctx.rbx = regs [AMD64_RBX]; + ctx.rbp = regs [AMD64_RBP]; + ctx.r12 = regs [AMD64_R12]; + ctx.r13 = regs [AMD64_R13]; + ctx.r14 = regs [AMD64_R14]; + ctx.r15 = regs [AMD64_R15]; + ctx.rdi = regs [AMD64_RDI]; + ctx.rsi = regs [AMD64_RSI]; + ctx.rax = regs [AMD64_RAX]; + ctx.rcx = regs [AMD64_RCX]; + ctx.rdx = regs [AMD64_RDX]; if (mono_object_isinst (exc, mono_defaults.exception_class)) { MonoException *mono_ex = (MonoException*)exc; if (!rethrow) mono_ex->stack_trace = NULL; } + + if (mono_debug_using_mono_debugger ()) { + guint8 buf [16], *code; + + mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf)); + code = buf + 8; + + if (buf [3] == 0xe8) { + MonoContext ctx_cp = ctx; + ctx_cp.rip = rip - 5; + + if (mono_debugger_handle_exception (&ctx_cp, exc)) { + restore_context (&ctx_cp); + g_assert_not_reached (); + } + } + } + + /* adjust eip so that it point into the call instruction */ + ctx.rip -= 1; + mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE); restore_context (&ctx); g_assert_not_reached (); } +void +mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4, + guint64 dummy5, guint64 dummy6, + mgreg_t *regs, mgreg_t rip, + guint32 ex_token_index, gint64 pc_offset) +{ + guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index; + MonoException *ex; + + ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token); + + rip -= pc_offset; + + /* Negate the ip adjustment done in mono_amd64_throw_exception () */ + rip += 1; + + mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE); +} + +static void +mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4, + guint64 dummy5, guint64 dummy6, + mgreg_t *regs, mgreg_t rip, + guint32 dummy7, gint64 dummy8) +{ + /* Only the register parameters are valid */ + MonoContext ctx; + + ctx.rsp = regs [AMD64_RSP]; + ctx.rip = rip; + ctx.rbx = regs [AMD64_RBX]; + ctx.rbp = regs [AMD64_RBP]; + ctx.r12 = regs [AMD64_R12]; + ctx.r13 = regs [AMD64_R13]; + ctx.r14 = regs [AMD64_R14]; + ctx.r15 = regs [AMD64_R15]; + ctx.rdi = regs [AMD64_RDI]; + ctx.rsi = regs [AMD64_RSI]; + ctx.rax = regs [AMD64_RAX]; + ctx.rcx = regs [AMD64_RCX]; + ctx.rdx = regs [AMD64_RDX]; + + mono_resume_unwind (&ctx); +} + +/* + * get_throw_trampoline: + * + * Generate a call to mono_amd64_throw_exception/ + * mono_amd64_throw_corlib_exception. + */ static gpointer -get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot) { guint8* start; guint8 *code; + MonoJumpInfo *ji = NULL; + GSList *unwind_ops = NULL; + int i, buf_size, stack_size, arg_offsets [16], regs_offset; - start = code = mono_global_codeman_reserve (64); + buf_size = 256; + start = code = mono_global_codeman_reserve (buf_size); - code = start; + /* The stack is unaligned on entry */ + stack_size = 192 + 8; - *ji = NULL; + code = start; - amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8); + if (info) + unwind_ops = mono_arch_get_cie_program (); - /* reverse order */ - amd64_push_imm (code, rethrow); - amd64_push_reg (code, AMD64_RDX); - amd64_push_reg (code, AMD64_RCX); - amd64_push_reg (code, AMD64_RAX); - amd64_push_reg (code, AMD64_RSI); - amd64_push_reg (code, AMD64_RDI); - amd64_push_reg (code, AMD64_R15); - amd64_push_reg (code, AMD64_R14); - amd64_push_reg (code, AMD64_R13); - amd64_push_reg (code, AMD64_R12); - amd64_push_reg (code, AMD64_RBP); - amd64_push_reg (code, AMD64_RBX); + /* Alloc frame */ + amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size); + if (info) + mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8); - /* SP */ - amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8); - amd64_push_reg (code, AMD64_RAX); + /* + * To hide linux/windows calling convention differences, we pass all arguments on + * the stack by passing 6 dummy values in registers. + */ - /* IP */ - amd64_push_membase (code, AMD64_R11, 0); + arg_offsets [0] = 0; + arg_offsets [1] = sizeof (gpointer); + arg_offsets [2] = sizeof (gpointer) * 2; + arg_offsets [3] = sizeof (gpointer) * 3; + regs_offset = sizeof (gpointer) * 4; - /* Exception */ - amd64_push_reg (code, AMD64_ARG_REG1); - -#ifdef PLATFORM_WIN32 - /* align stack */ - amd64_push_imm (code, 0); - amd64_push_imm (code, 0); - amd64_push_imm (code, 0); - amd64_push_imm (code, 0); - amd64_push_imm (code, 0); - amd64_push_imm (code, 0); -#endif + /* Save registers */ + for (i = 0; i < AMD64_NREG; ++i) + if (i != AMD64_RSP) + amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof (gpointer)), i, 8); + /* Save RSP */ + amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof (gpointer)); + amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof (gpointer)), X86_EAX, 8); + /* Set arg1 == regs */ + amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset); + amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, 8); + /* Set arg2 == eip */ + if (llvm_abs) + amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX); + else + amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, 8); + amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, 8); + /* Set arg3 == exc/ex_token_index */ + if (resume_unwind) + amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, 8); + else + amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, 8); + /* Set arg4 == rethrow/pc offset */ + if (resume_unwind) { + amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, 8); + } else if (corlib) { + amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, 8); + if (llvm_abs) + /* + * The caller is LLVM code which passes the absolute address not a pc offset, + * so compensate by passing 0 as 'rip' and passing the negated abs address as + * the pc offset. + */ + amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]); + } else { + amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, 8); + } if (aot) { - *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception"); + ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception"); amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); } else { - amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_throw_exception); + amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? (mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception)); } amd64_call_reg (code, AMD64_R11); amd64_breakpoint (code); mono_arch_flush_icache (start, code - start); - g_assert ((code - start) < 64); + g_assert ((code - start) < buf_size); - *code_size = code - start; + if (info) + *info = mono_tramp_info_create (g_strdup (tramp_name), start, code - start, ji, unwind_ops); return start; } @@ -425,36 +498,16 @@ get_throw_trampoline (gboolean rethrow, guint32 *code_size, MonoJumpInfo **ji, g * signature: void (*func) (MonoException *exc); * */ -gpointer -mono_arch_get_throw_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +gpointer +mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot) { - return get_throw_trampoline (FALSE, code_size, ji, aot); + return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot); } gpointer -mono_arch_get_rethrow_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot) { - return get_throw_trampoline (TRUE, code_size, ji, aot); -} - -gpointer -mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) -{ - guint8* start; - guint8 *code; - - start = code = mono_global_codeman_reserve (64); - - *ji = NULL; - - /* Not used on amd64 */ - amd64_breakpoint (code); - - mono_arch_flush_icache (start, code - start); - - *code_size = code - start; - - return start; + return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot); } /** @@ -468,189 +521,98 @@ mono_arch_get_throw_exception_by_name_full (guint32 *code_size, MonoJumpInfo **j * needs no relocations in the caller. */ gpointer -mono_arch_get_throw_corlib_exception_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot) +mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot) { - static guint8* start; - guint8 *code; - guint64 throw_ex; - - start = code = mono_global_codeman_reserve (64); - - *ji = NULL; - - /* Push throw_ip */ - amd64_push_reg (code, AMD64_ARG_REG2); - - /* Call exception_from_token */ - amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8); - if (aot) { - *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_IMAGE, mono_defaults.exception_class->image); - amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RIP, 0, 8); - *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_exception_from_token"); - amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); - } else { - amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image); - amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token); - } -#ifdef PLATFORM_WIN32 - amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32); -#endif - amd64_call_reg (code, AMD64_R11); -#ifdef PLATFORM_WIN32 - amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32); -#endif - - /* Compute throw_ip */ - amd64_pop_reg (code, AMD64_ARG_REG2); - /* return addr */ - amd64_pop_reg (code, AMD64_ARG_REG3); - amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2); - - /* Put the throw_ip at the top of the misaligned stack */ - amd64_push_reg (code, AMD64_ARG_REG3); - - throw_ex = (guint64)mono_get_throw_exception (); - - /* Call throw_exception */ - amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8); - if (aot) { - *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_throw_exception"); - amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); - } else { - amd64_mov_reg_imm (code, AMD64_R11, throw_ex); - } - /* The original IP is on the stack */ - amd64_jump_reg (code, AMD64_R11); - - g_assert ((code - start) < 64); - - mono_arch_flush_icache (start, code - start); - - *code_size = code - start; - - return start; + return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot); } -/* mono_arch_find_jit_info: +/* + * mono_arch_find_jit_info: * - * This function is used to gather information from @ctx. It return the - * MonoJitInfo of the corresponding function, unwinds one stack frame and - * stores the resulting context into @new_ctx. It also stores a string - * describing the stack location into @trace (if not NULL), and modifies - * the @lmf if necessary. @native_offset return the IP offset from the - * start of the function or -1 if that info is not available. + * This function is used to gather information from @ctx, and store it in @frame_info. + * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf + * is modified if needed. + * Returns TRUE on success, FALSE otherwise. */ -MonoJitInfo * -mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx, - MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed) +gboolean +mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, + MonoJitInfo *ji, MonoContext *ctx, + MonoContext *new_ctx, MonoLMF **lmf, + mgreg_t **save_locations, + StackFrameInfo *frame) { - MonoJitInfo *ji; - int i; gpointer ip = MONO_CONTEXT_GET_IP (ctx); - /* Avoid costly table lookup during stack overflow */ - if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size))) - ji = prev_ji; - else - ji = mono_jit_info_table_find (domain, ip); - - if (managed) - *managed = FALSE; + memset (frame, 0, sizeof (StackFrameInfo)); + frame->ji = ji; + frame->managed = FALSE; *new_ctx = *ctx; if (ji != NULL) { - int offset; - gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0; - - if (managed) - if (!ji->method->wrapper_type) - *managed = TRUE; - - /* - * If a method has save_lmf set, then register save/restore code is not generated - * by the JIT, so we have to restore callee saved registers from the lmf. - */ - if (ji->method->save_lmf) { - MonoLMF *lmf_addr; - - /* - * *lmf might not point to the LMF pushed by this method, so compute the LMF - * address instead. - */ - if (omit_fp) - lmf_addr = (MonoLMF*)ctx->rsp; - else - lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF)); - - new_ctx->rbp = lmf_addr->rbp; - new_ctx->rbx = lmf_addr->rbx; - new_ctx->r12 = lmf_addr->r12; - new_ctx->r13 = lmf_addr->r13; - new_ctx->r14 = lmf_addr->r14; - new_ctx->r15 = lmf_addr->r15; - } - else { - offset = omit_fp ? 0 : -1; - /* restore caller saved registers */ - for (i = 0; i < AMD64_NREG; i ++) - if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) { - guint64 reg; - - if (omit_fp) { - reg = *((guint64*)ctx->rsp + offset); - offset ++; - } - else { - reg = *((guint64 *)ctx->rbp + offset); - offset --; - } - - switch (i) { - case AMD64_RBX: - new_ctx->rbx = reg; - break; - case AMD64_R12: - new_ctx->r12 = reg; - break; - case AMD64_R13: - new_ctx->r13 = reg; - break; - case AMD64_R14: - new_ctx->r14 = reg; - break; - case AMD64_R15: - new_ctx->r15 = reg; - break; - case AMD64_RBP: - new_ctx->rbp = reg; - break; - default: - g_assert_not_reached (); - } - } - } + gssize regs [MONO_MAX_IREGS + 1]; + guint8 *cfa; + guint32 unwind_info_len; + guint8 *unwind_info; + + frame->type = FRAME_TYPE_MANAGED; + + if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) + frame->managed = TRUE; + + if (ji->from_aot) + unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len); + else + unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len); + + frame->unwind_info = unwind_info; + frame->unwind_info_len = unwind_info_len; + + regs [AMD64_RAX] = new_ctx->rax; + regs [AMD64_RBX] = new_ctx->rbx; + regs [AMD64_RCX] = new_ctx->rcx; + regs [AMD64_RDX] = new_ctx->rdx; + regs [AMD64_RBP] = new_ctx->rbp; + regs [AMD64_RSP] = new_ctx->rsp; + regs [AMD64_RSI] = new_ctx->rsi; + regs [AMD64_RDI] = new_ctx->rdi; + regs [AMD64_RIP] = new_ctx->rip; + regs [AMD64_R12] = new_ctx->r12; + regs [AMD64_R13] = new_ctx->r13; + regs [AMD64_R14] = new_ctx->r14; + regs [AMD64_R15] = new_ctx->r15; + + mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start, + (guint8*)ji->code_start + ji->code_size, + ip, regs, MONO_MAX_IREGS + 1, + save_locations, MONO_MAX_IREGS, &cfa); + + new_ctx->rax = regs [AMD64_RAX]; + new_ctx->rbx = regs [AMD64_RBX]; + new_ctx->rcx = regs [AMD64_RCX]; + new_ctx->rdx = regs [AMD64_RDX]; + new_ctx->rbp = regs [AMD64_RBP]; + new_ctx->rsp = regs [AMD64_RSP]; + new_ctx->rsi = regs [AMD64_RSI]; + new_ctx->rdi = regs [AMD64_RDI]; + new_ctx->rip = regs [AMD64_RIP]; + new_ctx->r12 = regs [AMD64_R12]; + new_ctx->r13 = regs [AMD64_R13]; + new_ctx->r14 = regs [AMD64_R14]; + new_ctx->r15 = regs [AMD64_R15]; + + /* The CFA becomes the new SP value */ + new_ctx->rsp = (gssize)cfa; + + /* Adjust IP */ + new_ctx->rip --; if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) { /* remove any unused lmf */ - *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); - } - - if (omit_fp) { - /* Pop frame */ - new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff); - new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1; - /* Pop return address */ - new_ctx->rsp += 8; - } - else { - /* Pop EBP and the return address */ - new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer)); - /* we substract 1, so that the IP points into the call instruction */ - new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1; - new_ctx->rbp = *((guint64 *)ctx->rbp); + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); } +#ifndef MONO_AMD64_NO_PUSHES /* Pop arguments off the stack */ { MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1); @@ -658,17 +620,36 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info); new_ctx->rsp += stack_to_pop; } +#endif - return ji; + return TRUE; } else if (*lmf) { guint64 rip; + if (((guint64)(*lmf)->previous_lmf) & 2) { + /* + * This LMF entry is created by the soft debug code to mark transitions to + * managed code done during invokes. + */ + MonoLMFExt *ext = (MonoLMFExt*)(*lmf); + + g_assert (ext->debugger_invoke); + + memcpy (new_ctx, &ext->ctx, sizeof (MonoContext)); + + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); + + frame->type = FRAME_TYPE_DEBUGGER_INVOKE; + + return TRUE; + } + if (((guint64)(*lmf)->previous_lmf) & 1) { /* This LMF has the rip field set */ rip = (*lmf)->rip; } else if ((*lmf)->rsp == 0) { /* Top LMF entry */ - return (gpointer)-1; + return FALSE; } else { /* * The rsp field is set just before the call which transitioned to native @@ -677,11 +658,21 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer)); } - ji = mono_jit_info_table_find (domain, (gpointer)rip); - if (!ji) { - // FIXME: This can happen with multiple appdomains (bug #444383) - return (gpointer)-1; - } + ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL); + /* + * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted + * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the + * return address. + */ + //g_assert (ji); + if (!ji) + return FALSE; + + /* Adjust IP */ + rip --; + + frame->ji = ji; + frame->type = FRAME_TYPE_MANAGED_TO_NATIVE; new_ctx->rip = rip; new_ctx->rbp = (*lmf)->rbp; @@ -692,13 +683,42 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf new_ctx->r13 = (*lmf)->r13; new_ctx->r14 = (*lmf)->r14; new_ctx->r15 = (*lmf)->r15; +#ifdef TARGET_WIN32 + new_ctx->rdi = (*lmf)->rdi; + new_ctx->rsi = (*lmf)->rsi; +#endif - *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1); + *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3); - return ji ? ji : res; + return TRUE; } - return NULL; + return FALSE; +} + +/* + * handle_exception: + * + * Called by resuming from a signal handler. + */ +static void +handle_signal_exception (gpointer obj, gboolean test_only) +{ + MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id); + MonoContext ctx; + static void (*restore_context) (MonoContext *); + + if (!restore_context) + restore_context = mono_get_restore_context (); + + memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext)); + + if (mono_debugger_handle_exception (&ctx, (MonoObject *)obj)) + return; + + mono_handle_exception (&ctx, obj, MONO_CONTEXT_GET_IP (&ctx), test_only); + + restore_context (&ctx); } /** @@ -710,51 +730,68 @@ mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInf gboolean mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only) { +#if defined(MONO_ARCH_USE_SIGACTION) + ucontext_t *ctx = (ucontext_t*)sigctx; + + /* + * Handling the exception in the signal handler is problematic, since the original + * signal is disabled, and we could run arbitrary code though the debugger. So + * resume into the normal stack and do most work there if possible. + */ + MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id); + guint64 sp = UCONTEXT_REG_RSP (ctx); + + /* Pass the ctx parameter in TLS */ + mono_arch_sigctx_to_monoctx (ctx, &jit_tls->ex_ctx); + /* The others in registers */ + UCONTEXT_REG_RDI (ctx) = (guint64)obj; + UCONTEXT_REG_RSI (ctx) = test_only; + + /* Allocate a stack frame below the red zone */ + sp -= 128; + /* The stack should be unaligned */ + if (sp % 8 == 0) + sp -= 8; + UCONTEXT_REG_RSP (ctx) = sp; + + UCONTEXT_REG_RIP (ctx) = (guint64)handle_signal_exception; + + return TRUE; +#else MonoContext mctx; mono_arch_sigctx_to_monoctx (sigctx, &mctx); + if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) + return TRUE; + mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only); mono_arch_monoctx_to_sigctx (&mctx, sigctx); return TRUE; -} - -#ifdef MONO_ARCH_USE_SIGACTION -static inline guint64* -gregs_from_ucontext (ucontext_t *ctx) -{ -#ifdef __FreeBSD__ - guint64 *gregs = (guint64 *) &ctx->uc_mcontext; -#else - guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs; #endif - - return gregs; } -#endif + void mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx) { -#ifdef MONO_ARCH_USE_SIGACTION +#if defined(MONO_ARCH_USE_SIGACTION) ucontext_t *ctx = (ucontext_t*)sigctx; - guint64 *gregs = gregs_from_ucontext (ctx); - - mctx->rax = gregs [REG_RAX]; - mctx->rbx = gregs [REG_RBX]; - mctx->rcx = gregs [REG_RCX]; - mctx->rdx = gregs [REG_RDX]; - mctx->rbp = gregs [REG_RBP]; - mctx->rsp = gregs [REG_RSP]; - mctx->rsi = gregs [REG_RSI]; - mctx->rdi = gregs [REG_RDI]; - mctx->rip = gregs [REG_RIP]; - mctx->r12 = gregs [REG_R12]; - mctx->r13 = gregs [REG_R13]; - mctx->r14 = gregs [REG_R14]; - mctx->r15 = gregs [REG_R15]; + mctx->rax = UCONTEXT_REG_RAX (ctx); + mctx->rbx = UCONTEXT_REG_RBX (ctx); + mctx->rcx = UCONTEXT_REG_RCX (ctx); + mctx->rdx = UCONTEXT_REG_RDX (ctx); + mctx->rbp = UCONTEXT_REG_RBP (ctx); + mctx->rsp = UCONTEXT_REG_RSP (ctx); + mctx->rsi = UCONTEXT_REG_RSI (ctx); + mctx->rdi = UCONTEXT_REG_RDI (ctx); + mctx->rip = UCONTEXT_REG_RIP (ctx); + mctx->r12 = UCONTEXT_REG_R12 (ctx); + mctx->r13 = UCONTEXT_REG_R13 (ctx); + mctx->r14 = UCONTEXT_REG_R14 (ctx); + mctx->r15 = UCONTEXT_REG_R15 (ctx); #else MonoContext *ctx = (MonoContext *)sigctx; @@ -777,24 +814,22 @@ mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx) void mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx) { -#ifdef MONO_ARCH_USE_SIGACTION +#if defined(MONO_ARCH_USE_SIGACTION) ucontext_t *ctx = (ucontext_t*)sigctx; - guint64 *gregs = gregs_from_ucontext (ctx); - - gregs [REG_RAX] = mctx->rax; - gregs [REG_RBX] = mctx->rbx; - gregs [REG_RCX] = mctx->rcx; - gregs [REG_RDX] = mctx->rdx; - gregs [REG_RBP] = mctx->rbp; - gregs [REG_RSP] = mctx->rsp; - gregs [REG_RSI] = mctx->rsi; - gregs [REG_RDI] = mctx->rdi; - gregs [REG_RIP] = mctx->rip; - gregs [REG_R12] = mctx->r12; - gregs [REG_R13] = mctx->r13; - gregs [REG_R14] = mctx->r14; - gregs [REG_R15] = mctx->r15; + UCONTEXT_REG_RAX (ctx) = mctx->rax; + UCONTEXT_REG_RBX (ctx) = mctx->rbx; + UCONTEXT_REG_RCX (ctx) = mctx->rcx; + UCONTEXT_REG_RDX (ctx) = mctx->rdx; + UCONTEXT_REG_RBP (ctx) = mctx->rbp; + UCONTEXT_REG_RSP (ctx) = mctx->rsp; + UCONTEXT_REG_RSI (ctx) = mctx->rsi; + UCONTEXT_REG_RDI (ctx) = mctx->rdi; + UCONTEXT_REG_RIP (ctx) = mctx->rip; + UCONTEXT_REG_R12 (ctx) = mctx->r12; + UCONTEXT_REG_R13 (ctx) = mctx->r13; + UCONTEXT_REG_R14 (ctx) = mctx->r14; + UCONTEXT_REG_R15 (ctx) = mctx->r15; #else MonoContext *ctx = (MonoContext *)sigctx; @@ -817,14 +852,10 @@ mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx) gpointer mono_arch_ip_from_context (void *sigctx) { - -#ifdef MONO_ARCH_USE_SIGACTION - +#if defined(MONO_ARCH_USE_SIGACTION) ucontext_t *ctx = (ucontext_t*)sigctx; - guint64 *gregs = gregs_from_ucontext (ctx); - - return (gpointer)gregs [REG_RIP]; + return (gpointer)UCONTEXT_REG_RIP (ctx); #else MonoContext *ctx = sigctx; return (gpointer)ctx->rip; @@ -865,6 +896,13 @@ altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf) restore_context = mono_get_restore_context (); mono_arch_sigctx_to_monoctx (sigctx, &mctx); + + if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) { + if (stack_ovf) + prepare_for_guard_pages (&mctx); + restore_context (&mctx); + } + mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE); if (stack_ovf) prepare_for_guard_pages (&mctx); @@ -874,11 +912,10 @@ altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf) void mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf) { -#ifdef MONO_ARCH_USE_SIGACTION +#if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS) MonoException *exc = NULL; ucontext_t *ctx = (ucontext_t*)sigctx; - guint64 *gregs = gregs_from_ucontext (ctx); - MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]); + MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL); gpointer *sp; int frame_size; @@ -898,23 +935,23 @@ mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128; frame_size += 15; frame_size &= ~15; - sp = (gpointer)(gregs [REG_RSP] & ~15); + sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15); sp = (gpointer)((char*)sp - frame_size); /* the arguments must be aligned */ - sp [-1] = (gpointer)gregs [REG_RIP]; + sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx); /* may need to adjust pointers in the new struct copy, depending on the OS */ memcpy (sp + 4, ctx, sizeof (ucontext_t)); /* at the return form the signal handler execution starts in altstack_handle_and_restore() */ - gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore; - gregs [REG_RSP] = (unsigned long)(sp - 1); - gregs [REG_RDI] = (unsigned long)(sp + 4); - gregs [REG_RSI] = (guint64)exc; - gregs [REG_RDX] = stack_ovf; + UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore; + UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1); + UCONTEXT_REG_RDI (sigctx) = (unsigned long)(sp + 4); + UCONTEXT_REG_RSI (sigctx) = (guint64)exc; + UCONTEXT_REG_RDX (sigctx) = stack_ovf; #endif } -static guint64 -get_original_ip (void) +guint64 +mono_amd64_get_original_ip (void) { MonoLMF *lmf = mono_get_lmf (); @@ -926,17 +963,14 @@ get_original_ip (void) return lmf->rip; } -static gpointer -get_throw_pending_exception (void) +gpointer +mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot) { - static guint8* start; - static gboolean inited = FALSE; - guint8 *code; + guint8 *code, *start; guint8 *br[1]; gpointer throw_trampoline; - - if (inited) - return start; + MonoJumpInfo *ji = NULL; + GSList *unwind_ops = NULL; start = code = mono_global_codeman_reserve (128); @@ -957,7 +991,12 @@ get_throw_pending_exception (void) amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* Obtain the pending exception */ - amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception); + if (aot) { + ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception); + } amd64_call_reg (code, AMD64_R11); /* Check if it is NULL, and branch */ @@ -972,7 +1011,12 @@ get_throw_pending_exception (void) amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8); /* Obtain the original ip and clear the flag in previous_lmf */ - amd64_mov_reg_imm (code, AMD64_R11, get_original_ip); + if (aot) { + ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip); + } amd64_call_reg (code, AMD64_R11); /* Load exc */ @@ -988,8 +1032,13 @@ get_throw_pending_exception (void) amd64_push_reg (code, AMD64_RAX); /* Call the throw trampoline */ - throw_trampoline = mono_get_throw_exception (); - amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline); + if (aot) { + ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + throw_trampoline = mono_get_throw_exception (); + amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline); + } /* We use a jump instead of a call so we can push the original ip on the stack */ amd64_jump_reg (code, AMD64_R11); @@ -997,7 +1046,12 @@ get_throw_pending_exception (void) mono_amd64_patch (br [0], code); /* Obtain the original ip and clear the flag in previous_lmf */ - amd64_mov_reg_imm (code, AMD64_R11, get_original_ip); + if (aot) { + ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip"); + amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8); + } else { + amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip); + } amd64_call_reg (code, AMD64_R11); amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8); @@ -1013,11 +1067,14 @@ get_throw_pending_exception (void) g_assert ((code - start) < 128); - inited = TRUE; + if (info) + *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops); return start; } +static gpointer throw_pending_exception; + /* * Called when a thread receives an async exception while executing unmanaged code. * Instead of checking for this exception in the managed-to-native wrapper, we hijack @@ -1029,6 +1086,10 @@ mono_arch_notify_pending_exc (void) { MonoLMF *lmf = mono_get_lmf (); + if (!lmf) + /* Not yet started */ + return; + if (lmf->rsp == 0) /* Initial LMF */ return; @@ -1042,10 +1103,62 @@ mono_arch_notify_pending_exc (void) /* Signal that lmf->rip is set */ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1); - *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception (); + *(gpointer*)(lmf->rsp - 8) = throw_pending_exception; } -#ifdef PLATFORM_WIN32 +GSList* +mono_amd64_get_exception_trampolines (gboolean aot) +{ + MonoTrampInfo *info; + GSList *tramps = NULL; + + mono_arch_get_throw_pending_exception (&info, aot); + tramps = g_slist_prepend (tramps, info); + + /* LLVM needs different throw trampolines */ + get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot); + tramps = g_slist_prepend (tramps, info); + + get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot); + tramps = g_slist_prepend (tramps, info); + + get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE); + tramps = g_slist_prepend (tramps, info); + + return tramps; +} + +void +mono_arch_exceptions_init (void) +{ + GSList *tramps, *l; + gpointer tramp; + + if (mono_aot_only) { + throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception"); + tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline"); + mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE); + tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline"); + mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE); + tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline"); + mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE); + } else { + /* Call this to avoid initialization races */ + throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE); + + tramps = mono_amd64_get_exception_trampolines (FALSE); + for (l = tramps; l; l = l->next) { + MonoTrampInfo *info = l->data; + + mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE); + mono_save_trampoline_xdebug_info (info); + mono_tramp_info_free (info); + } + g_slist_free (tramps); + } +} + +#ifdef TARGET_WIN32 /* * The mono_arch_unwindinfo* methods are used to build and add @@ -1242,7 +1355,7 @@ MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context ) PMonoUnwindInfo targetinfo; MonoDomain *domain = mono_domain_get (); - ji = mono_jit_info_table_find (domain, (char*)ControlPc); + ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL); if (!ji) return 0; @@ -1287,5 +1400,74 @@ mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint #endif +#if MONO_SUPPORT_TASKLETS +MonoContinuationRestore +mono_tasklets_arch_restore (void) +{ + static guint8* saved = NULL; + guint8 *code, *start; + int cont_reg = AMD64_R9; /* register usable on both call conventions */ + + if (saved) + return (MonoContinuationRestore)saved; + code = start = mono_global_codeman_reserve (64); + /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */ + /* cont is in AMD64_ARG_REG1 ($rcx or $rdi) + * state is in AMD64_ARG_REG2 ($rdx or $rsi) + * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx) + * We move cont to cont_reg since we need both rcx and rdi for the copy + * state is moved to $rax so it's setup as the return value and we can overwrite $rsi + */ + amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8); + amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8); + /* setup the copy of the stack */ + amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int)); + amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3); + x86_cld (code); + amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer)); + amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer)); + amd64_prefix (code, X86_REP_PREFIX); + amd64_movsl (code); + + /* now restore the registers from the LMF */ + amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8); + amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8); + amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8); + amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8); + amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8); + amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8); + amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8); +#ifdef TARGET_WIN32 + amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8); + amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8); +#endif + amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8); + /* restore the lmf chain */ + /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4); + x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/ + /* state is already in rax */ + amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip)); + g_assert ((code - start) <= 64); + saved = start; + return (MonoContinuationRestore)saved; +} +#endif + +/* + * mono_arch_setup_resume_sighandler_ctx: + * + * Setup CTX so execution continues at FUNC. + */ +void +mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func) +{ + /* + * When resuming from a signal handler, the stack should be misaligned, just like right after + * a call. + */ + if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0) + MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8); + MONO_CONTEXT_SET_IP (ctx, func); +}