#include <glib.h>
#include <signal.h>
#include <string.h>
+#ifndef PLATFORM_WIN32
#include <sys/ucontext.h>
+#endif
#include <mono/arch/amd64/amd64-codegen.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
+#include <mono/metadata/threads-types.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/mono-debug.h>
-#include <mono/metadata/mono-debug-debugger.h>
+#include <mono/utils/mono-mmap.h>
#include "mini.h"
#include "mini-amd64.h"
sctx = g_malloc(sizeof(MonoContext));
/* Copy Win32 context to UNIX style context */
- sctx->eax = ctx->Eax;
- sctx->ebx = ctx->Ebx;
- sctx->ecx = ctx->Ecx;
- sctx->edx = ctx->Edx;
- sctx->ebp = ctx->Ebp;
- sctx->esp = ctx->Esp;
- sctx->esi = ctx->Esi;
- sctx->edi = ctx->Edi;
- sctx->eip = ctx->Eip;
+ sctx->rax = ctx->Rax;
+ sctx->rbx = ctx->Rbx;
+ sctx->rcx = ctx->Rcx;
+ sctx->rdx = ctx->Rdx;
+ sctx->rbp = ctx->Rbp;
+ sctx->rsp = ctx->Rsp;
+ sctx->rsi = ctx->Rsi;
+ sctx->rdi = ctx->Rdi;
+ sctx->rip = ctx->Rip;
+ sctx->r12 = ctx->R12;
+ sctx->r13 = ctx->R13;
+ sctx->r14 = ctx->R14;
+ sctx->r15 = ctx->R15;
switch (er->ExceptionCode) {
case EXCEPTION_ACCESS_VIOLATION:
}
/* Copy context back */
- ctx->Eax = sctx->eax;
- ctx->Ebx = sctx->ebx;
- ctx->Ecx = sctx->ecx;
- ctx->Edx = sctx->edx;
- ctx->Ebp = sctx->ebp;
- ctx->Esp = sctx->esp;
- ctx->Esi = sctx->esi;
- ctx->Edi = sctx->edi;
- ctx->Eip = sctx->eip;
+ ctx->Rax = sctx->rax;
+ ctx->Rbx = sctx->rbx;
+ ctx->Rcx = sctx->rcx;
+ ctx->Rdx = sctx->rdx;
+ ctx->Rbp = sctx->rbp;
+ ctx->Rsp = sctx->rsp;
+ ctx->Rsi = sctx->rsi;
+ ctx->Rdi = sctx->rdi;
+ ctx->Rip = sctx->rip;
+
+ g_free (sctx);
return res;
}
start = code = mono_global_codeman_reserve (256);
- /* get return address */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rip), 8);
-
- /* Restore registers */
- amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8);
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8);
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8);
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8);
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8);
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
+
+ /* Restore all registers except %rip and %r11 */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
+ amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
+ amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
+ amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
+ //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
+ //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
+ //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
+ amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
+ amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
+ amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
+ amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
+
+ amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
- amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rsp), 8);
+ /* get return address */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
/* jump to the saved IP */
- amd64_jump_reg (code, AMD64_RAX);
+ amd64_jump_reg (code, AMD64_R11);
inited = TRUE;
if (inited)
return start;
- start = code = mono_global_codeman_reserve (64);
+ start = code = mono_global_codeman_reserve (128);
/* call_filter (MonoContext *ctx, unsigned long eip) */
code = start;
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
/* set new EBP */
- amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
/* load callee saved regs */
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8);
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8);
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8);
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8);
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
+ amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
+ amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
+ amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
+ amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
+#ifdef PLATFORM_WIN32
+ amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
+ amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
+#endif
/* call the handler */
- amd64_call_reg (code, AMD64_RSI);
+ amd64_call_reg (code, AMD64_ARG_REG2);
if (! (pos & 8))
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
amd64_leave (code);
amd64_ret (code);
- g_assert ((code - start) < 64);
+ g_assert ((code - start) < 128);
inited = TRUE;
return start;
}
+/*
+ * The first few arguments are dummy, to force the other arguments to be passed on
+ * the stack, this avoids overwriting the argument registers in the throw trampoline.
+ */
static void
-throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
- guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
- guint64 r14, guint64 r15, guint64 rethrow)
+throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
+ guint64 dummy5, guint64 dummy6,
+ MonoObject *exc, guint64 rip, guint64 rsp,
+ guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
+ guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
+ guint64 rax, guint64 rcx, guint64 rdx,
+ guint64 rethrow)
{
static void (*restore_context) (MonoContext *);
MonoContext ctx;
ctx.r13 = r13;
ctx.r14 = r14;
ctx.r15 = r15;
+ ctx.rdi = rdi;
+ ctx.rsi = rsi;
+ ctx.rax = rax;
+ ctx.rcx = rcx;
+ ctx.rdx = rdx;
if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
/*
code = start;
- /* Exception */
- amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 8);
- /* IP */
- amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, 0, 8);
- /* SP */
- amd64_lea_membase (code, AMD64_RDX, AMD64_RSP, 8);
- /* Callee saved regs */
- amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
- amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
- amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
- /* align stack */
- amd64_push_imm (code, 0);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
+
/* reverse order */
amd64_push_imm (code, rethrow);
+ amd64_push_reg (code, AMD64_RDX);
+ amd64_push_reg (code, AMD64_RCX);
+ amd64_push_reg (code, AMD64_RAX);
+ amd64_push_reg (code, AMD64_RSI);
+ amd64_push_reg (code, AMD64_RDI);
amd64_push_reg (code, AMD64_R15);
amd64_push_reg (code, AMD64_R14);
amd64_push_reg (code, AMD64_R13);
+ amd64_push_reg (code, AMD64_R12);
+ amd64_push_reg (code, AMD64_RBP);
+ amd64_push_reg (code, AMD64_RBX);
+
+ /* SP */
+ amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* IP */
+ amd64_push_membase (code, AMD64_R11, 0);
+
+ /* Exception */
+ amd64_push_reg (code, AMD64_ARG_REG1);
+
+#ifdef PLATFORM_WIN32
+ /* align stack */
+ amd64_push_imm (code, 0);
+ amd64_push_imm (code, 0);
+ amd64_push_imm (code, 0);
+ amd64_push_imm (code, 0);
+ amd64_push_imm (code, 0);
+ amd64_push_imm (code, 0);
+#endif
amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
amd64_call_reg (code, AMD64_R11);
start = code = mono_global_codeman_reserve (64);
/* Push throw_ip */
- amd64_push_reg (code, AMD64_RSI);
+ amd64_push_reg (code, AMD64_ARG_REG2);
/* Call exception_from_token */
- amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RDI, 8);
- amd64_mov_reg_imm (code, AMD64_RDI, mono_defaults.exception_class->image);
+ amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
+ amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
amd64_call_reg (code, AMD64_R11);
/* Compute throw_ip */
- amd64_pop_reg (code, AMD64_RSI);
+ amd64_pop_reg (code, AMD64_ARG_REG2);
/* return addr */
- amd64_pop_reg (code, AMD64_RDX);
- amd64_alu_reg_reg (code, X86_SUB, AMD64_RDX, AMD64_RSI);
+ amd64_pop_reg (code, AMD64_ARG_REG3);
+ amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
/* Put the throw_ip at the top of the misaligned stack */
- amd64_push_reg (code, AMD64_RDX);
+ amd64_push_reg (code, AMD64_ARG_REG3);
throw_ex = (guint64)mono_arch_get_throw_exception ();
/* Call throw_exception */
- amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RAX, 8);
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
/* The original IP is on the stack */
amd64_jump_reg (code, AMD64_R11);
if (managed)
*managed = FALSE;
+ *new_ctx = *ctx;
+
if (ji != NULL) {
int offset;
gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
- *new_ctx = *ctx;
-
if (managed)
if (!ji->method->wrapper_type)
*managed = TRUE;
/*
- * Some managed methods like pinvoke wrappers might have save_lmf set.
- * In this case, register save/restore code is not generated by the
- * JIT, so we have to restore callee saved registers from the lmf.
+ * If a method has save_lmf set, then register save/restore code is not generated
+ * by the JIT, so we have to restore callee saved registers from the lmf.
*/
if (ji->method->save_lmf) {
+ MonoLMF *lmf_addr;
+
/*
- * We only need to do this if the exception was raised in managed
- * code, since otherwise the lmf was already popped of the stack.
+ * *lmf might not point to the LMF pushed by this method, so compute the LMF
+ * address instead.
*/
- /* This works because save_lmf prevents fp elimination */
- if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
- new_ctx->rbx = (*lmf)->rbx;
- new_ctx->r12 = (*lmf)->r12;
- new_ctx->r13 = (*lmf)->r13;
- new_ctx->r14 = (*lmf)->r14;
- new_ctx->r15 = (*lmf)->r15;
- }
+ if (omit_fp)
+ lmf_addr = (MonoLMF*)ctx->rsp;
+ else
+ lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
+
+ new_ctx->rbp = lmf_addr->rbp;
+ new_ctx->rbx = lmf_addr->rbx;
+ new_ctx->r12 = lmf_addr->r12;
+ new_ctx->r13 = lmf_addr->r13;
+ new_ctx->r14 = lmf_addr->r14;
+ new_ctx->r15 = lmf_addr->r15;
}
else {
offset = omit_fp ? 0 : -1;
offset ++;
}
else {
- reg = *((guint64 *)ctx->SC_EBP + offset);
+ reg = *((guint64 *)ctx->rbp + offset);
offset --;
}
if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
/* remove any unused lmf */
- *lmf = (*lmf)->previous_lmf;
+ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
}
if (omit_fp) {
/* Pop frame */
new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
- new_ctx->SC_EIP = *((guint64 *)new_ctx->rsp) - 1;
+ new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
/* Pop return address */
new_ctx->rsp += 8;
}
else {
/* Pop EBP and the return address */
- new_ctx->SC_ESP = ctx->SC_EBP + (2 * sizeof (gpointer));
+ new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
/* we substract 1, so that the IP points into the call instruction */
- new_ctx->SC_EIP = *((guint64 *)ctx->SC_EBP + 1) - 1;
- new_ctx->SC_EBP = *((guint64 *)ctx->SC_EBP);
+ new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
+ new_ctx->rbp = *((guint64 *)ctx->rbp);
}
/* Pop arguments off the stack */
MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
- new_ctx->SC_ESP += stack_to_pop;
+ new_ctx->rsp += stack_to_pop;
}
return ji;
} else if (*lmf) {
-
- *new_ctx = *ctx;
+ guint64 rip;
- if (!(*lmf)->method)
+ if (((guint64)(*lmf)->previous_lmf) & 1) {
+ /* This LMF has the rip field set */
+ rip = (*lmf)->rip;
+ } else if ((*lmf)->rsp == 0) {
+ /* Top LMF entry */
return (gpointer)-1;
-
- if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) {
} else {
+ /*
+ * The rsp field is set just before the call which transitioned to native
+ * code. Obtain the rip from the stack.
+ */
+ rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
+ }
+
+ ji = mono_jit_info_table_find (domain, (gpointer)rip);
+ if (!ji) {
+ if (!(*lmf)->method)
+ /* Top LMF entry */
+ return (gpointer)-1;
+ /* Trampoline lmf frame */
memset (res, 0, sizeof (MonoJitInfo));
res->method = (*lmf)->method;
}
- new_ctx->SC_RIP = (*lmf)->rip;
- new_ctx->SC_RBP = (*lmf)->ebp;
- new_ctx->SC_ESP = (*lmf)->rsp;
+ new_ctx->rip = rip;
+ new_ctx->rbp = (*lmf)->rbp;
+ new_ctx->rsp = (*lmf)->rsp;
- new_ctx->SC_RBX = (*lmf)->rbx;
- new_ctx->SC_R12 = (*lmf)->r12;
- new_ctx->SC_R13 = (*lmf)->r13;
- new_ctx->SC_R14 = (*lmf)->r14;
- new_ctx->SC_R15 = (*lmf)->r15;
+ new_ctx->rbx = (*lmf)->rbx;
+ new_ctx->r12 = (*lmf)->r12;
+ new_ctx->r13 = (*lmf)->r13;
+ new_ctx->r14 = (*lmf)->r14;
+ new_ctx->r15 = (*lmf)->r15;
- *lmf = (*lmf)->previous_lmf;
+ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
return ji ? ji : res;
}
gboolean
mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
{
- ucontext_t *ctx = (ucontext_t*)sigctx;
MonoContext mctx;
- mctx.rax = ctx->uc_mcontext.gregs [REG_RAX];
- mctx.rbx = ctx->uc_mcontext.gregs [REG_RBX];
- mctx.rcx = ctx->uc_mcontext.gregs [REG_RCX];
- mctx.rdx = ctx->uc_mcontext.gregs [REG_RDX];
- mctx.rbp = ctx->uc_mcontext.gregs [REG_RBP];
- mctx.rsp = ctx->uc_mcontext.gregs [REG_RSP];
- mctx.rsi = ctx->uc_mcontext.gregs [REG_RSI];
- mctx.rdi = ctx->uc_mcontext.gregs [REG_RDI];
- mctx.rip = ctx->uc_mcontext.gregs [REG_RIP];
- mctx.r12 = ctx->uc_mcontext.gregs [REG_R12];
- mctx.r13 = ctx->uc_mcontext.gregs [REG_R13];
- mctx.r14 = ctx->uc_mcontext.gregs [REG_R14];
- mctx.r15 = ctx->uc_mcontext.gregs [REG_R15];
-
- mono_handle_exception (&mctx, obj, (gpointer)mctx.rip, test_only);
-
- ctx->uc_mcontext.gregs [REG_RAX] = mctx.rax;
- ctx->uc_mcontext.gregs [REG_RBX] = mctx.rbx;
- ctx->uc_mcontext.gregs [REG_RCX] = mctx.rcx;
- ctx->uc_mcontext.gregs [REG_RDX] = mctx.rdx;
- ctx->uc_mcontext.gregs [REG_RBP] = mctx.rbp;
- ctx->uc_mcontext.gregs [REG_RSP] = mctx.rsp;
- ctx->uc_mcontext.gregs [REG_RSI] = mctx.rsi;
- ctx->uc_mcontext.gregs [REG_RDI] = mctx.rdi;
- ctx->uc_mcontext.gregs [REG_RIP] = mctx.rip;
- ctx->uc_mcontext.gregs [REG_R12] = mctx.r12;
- ctx->uc_mcontext.gregs [REG_R13] = mctx.r13;
- ctx->uc_mcontext.gregs [REG_R14] = mctx.r14;
- ctx->uc_mcontext.gregs [REG_R15] = mctx.r15;
+ mono_arch_sigctx_to_monoctx (sigctx, &mctx);
+
+ mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
+
+ mono_arch_monoctx_to_sigctx (&mctx, sigctx);
return TRUE;
}
+#ifdef MONO_ARCH_USE_SIGACTION
+static inline guint64*
+gregs_from_ucontext (ucontext_t *ctx)
+{
+#ifdef __FreeBSD__
+ guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
+#else
+ guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
+#endif
+
+ return gregs;
+}
+#endif
+void
+mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
+{
+#ifdef MONO_ARCH_USE_SIGACTION
+ ucontext_t *ctx = (ucontext_t*)sigctx;
+
+ guint64 *gregs = gregs_from_ucontext (ctx);
+
+ mctx->rax = gregs [REG_RAX];
+ mctx->rbx = gregs [REG_RBX];
+ mctx->rcx = gregs [REG_RCX];
+ mctx->rdx = gregs [REG_RDX];
+ mctx->rbp = gregs [REG_RBP];
+ mctx->rsp = gregs [REG_RSP];
+ mctx->rsi = gregs [REG_RSI];
+ mctx->rdi = gregs [REG_RDI];
+ mctx->rip = gregs [REG_RIP];
+ mctx->r12 = gregs [REG_R12];
+ mctx->r13 = gregs [REG_R13];
+ mctx->r14 = gregs [REG_R14];
+ mctx->r15 = gregs [REG_R15];
+#else
+ MonoContext *ctx = (MonoContext *)sigctx;
+
+ mctx->rax = ctx->rax;
+ mctx->rbx = ctx->rbx;
+ mctx->rcx = ctx->rcx;
+ mctx->rdx = ctx->rdx;
+ mctx->rbp = ctx->rbp;
+ mctx->rsp = ctx->rsp;
+ mctx->rsi = ctx->rsi;
+ mctx->rdi = ctx->rdi;
+ mctx->rip = ctx->rip;
+ mctx->r12 = ctx->r12;
+ mctx->r13 = ctx->r13;
+ mctx->r14 = ctx->r14;
+ mctx->r15 = ctx->r15;
+#endif
+}
+
+void
+mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
+{
+#ifdef MONO_ARCH_USE_SIGACTION
+ ucontext_t *ctx = (ucontext_t*)sigctx;
+
+ guint64 *gregs = gregs_from_ucontext (ctx);
+
+ gregs [REG_RAX] = mctx->rax;
+ gregs [REG_RBX] = mctx->rbx;
+ gregs [REG_RCX] = mctx->rcx;
+ gregs [REG_RDX] = mctx->rdx;
+ gregs [REG_RBP] = mctx->rbp;
+ gregs [REG_RSP] = mctx->rsp;
+ gregs [REG_RSI] = mctx->rsi;
+ gregs [REG_RDI] = mctx->rdi;
+ gregs [REG_RIP] = mctx->rip;
+ gregs [REG_R12] = mctx->r12;
+ gregs [REG_R13] = mctx->r13;
+ gregs [REG_R14] = mctx->r14;
+ gregs [REG_R15] = mctx->r15;
+#else
+ MonoContext *ctx = (MonoContext *)sigctx;
+
+ ctx->rax = mctx->rax;
+ ctx->rbx = mctx->rbx;
+ ctx->rcx = mctx->rcx;
+ ctx->rdx = mctx->rdx;
+ ctx->rbp = mctx->rbp;
+ ctx->rsp = mctx->rsp;
+ ctx->rsi = mctx->rsi;
+ ctx->rdi = mctx->rdi;
+ ctx->rip = mctx->rip;
+ ctx->r12 = mctx->r12;
+ ctx->r13 = mctx->r13;
+ ctx->r14 = mctx->r14;
+ ctx->r15 = mctx->r15;
+#endif
+}
+
gpointer
mono_arch_ip_from_context (void *sigctx)
{
+
+#ifdef MONO_ARCH_USE_SIGACTION
+
+ ucontext_t *ctx = (ucontext_t*)sigctx;
+
+ guint64 *gregs = gregs_from_ucontext (ctx);
+
+ return (gpointer)gregs [REG_RIP];
+#else
+ MonoContext *ctx = sigctx;
+ return (gpointer)ctx->rip;
+#endif
+}
+
+static void
+restore_soft_guard_pages (void)
+{
+ MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ if (jit_tls->stack_ovf_guard_base)
+ mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
+}
+
+/*
+ * this function modifies mctx so that when it is restored, it
+ * won't execcute starting at mctx.eip, but in a function that
+ * will restore the protection on the soft-guard pages and return back to
+ * continue at mctx.eip.
+ */
+static void
+prepare_for_guard_pages (MonoContext *mctx)
+{
+ gpointer *sp;
+ sp = (gpointer)(mctx->rsp);
+ sp -= 1;
+ /* the return addr */
+ sp [0] = (gpointer)(mctx->rip);
+ mctx->rip = (guint64)restore_soft_guard_pages;
+ mctx->rsp = (guint64)sp;
+}
+
+static void
+altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
+{
+ void (*restore_context) (MonoContext *);
+ MonoContext mctx;
+
+ restore_context = mono_arch_get_restore_context ();
+ mono_arch_sigctx_to_monoctx (sigctx, &mctx);
+ mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
+ if (stack_ovf)
+ prepare_for_guard_pages (&mctx);
+ restore_context (&mctx);
+}
+
+void
+mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
+{
+#ifdef MONO_ARCH_USE_SIGACTION
+ MonoException *exc = NULL;
ucontext_t *ctx = (ucontext_t*)sigctx;
- return (gpointer)ctx->uc_mcontext.gregs [REG_RIP];
+ guint64 *gregs = gregs_from_ucontext (ctx);
+ MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
+ gpointer *sp;
+ int frame_size;
+
+ if (stack_ovf)
+ exc = mono_domain_get ()->stack_overflow_ex;
+ if (!ji)
+ mono_handle_native_sigsegv (SIGSEGV, sigctx);
+
+ /* setup a call frame on the real stack so that control is returned there
+ * and exception handling can continue.
+ * The frame looks like:
+ * ucontext struct
+ * ...
+ * return ip
+ * 128 is the size of the red zone
+ */
+ frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
+ frame_size += 15;
+ frame_size &= ~15;
+ sp = (gpointer)(gregs [REG_RSP] & ~15);
+ sp = (gpointer)((char*)sp - frame_size);
+ /* the arguments must be aligned */
+ sp [-1] = (gpointer)gregs [REG_RIP];
+ /* may need to adjust pointers in the new struct copy, depending on the OS */
+ memcpy (sp + 4, ctx, sizeof (ucontext_t));
+ /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
+ gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
+ gregs [REG_RSP] = (unsigned long)(sp - 1);
+ gregs [REG_RDI] = (unsigned long)(sp + 4);
+ gregs [REG_RSI] = (guint64)exc;
+ gregs [REG_RDX] = stack_ovf;
+#endif
+}
+
+static guint64
+get_original_ip (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ g_assert (lmf);
+
+ /* Reset the change to previous_lmf */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
+
+ return lmf->rip;
}
+static gpointer
+get_throw_pending_exception (void)
+{
+ static guint8* start;
+ static gboolean inited = FALSE;
+ guint8 *code;
+ guint8 *br[1];
+ gpointer throw_trampoline;
+
+ if (inited)
+ return start;
+
+ start = code = mono_global_codeman_reserve (128);
+
+ /* We are in the frame of a managed method after a call */
+ /*
+ * We would like to throw the pending exception in such a way that it looks to
+ * be thrown from the managed method.
+ */
+
+ /* Save registers which might contain the return value of the call */
+ amd64_push_reg (code, AMD64_RAX);
+ amd64_push_reg (code, AMD64_RDX);
+
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+ amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
+
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the pending exception */
+ amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Check if it is NULL, and branch */
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+
+ /* exc != NULL branch */
+
+ /* Save the exc on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Load exc */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
+
+ /* Pop saved stuff from the stack */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
+
+ /* Setup arguments for the throw trampoline */
+ /* Exception */
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
+ /* The trampoline expects the caller ip to be pushed on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* Call the throw trampoline */
+ throw_trampoline = mono_arch_get_throw_exception ();
+ amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
+ /* We use a jump instead of a call so we can push the original ip on the stack */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* ex == NULL branch */
+ mono_amd64_patch (br [0], code);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
+
+ /* Restore registers */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_pop_reg (code, AMD64_RDX);
+ amd64_pop_reg (code, AMD64_RAX);
+
+ /* Return to original code */
+ amd64_jump_reg (code, AMD64_R11);
+
+ g_assert ((code - start) < 128);
+
+ inited = TRUE;
+
+ return start;
+}
+
+/*
+ * Called when a thread receives an async exception while executing unmanaged code.
+ * Instead of checking for this exception in the managed-to-native wrapper, we hijack
+ * the return address on the stack to point to a helper routine which throws the
+ * exception.
+ */
+void
+mono_arch_notify_pending_exc (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ if (lmf->rsp == 0)
+ /* Initial LMF */
+ return;
+
+ if ((guint64)lmf->previous_lmf & 1)
+ /* Already hijacked or trampoline LMF entry */
+ return;
+
+ /* lmf->rsp is set just before making the call which transitions to unmanaged code */
+ lmf->rip = *(guint64*)(lmf->rsp - 8);
+ /* Signal that lmf->rip is set */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
+
+ *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
+}