#include <mono/metadata/appdomain.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/threads.h>
+#include <mono/metadata/threads-types.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/gc-internal.h>
start = code = mono_global_codeman_reserve (256);
- /* get return address */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rip), 8);
-
- /* Restore registers */
- amd64_mov_reg_membase (code, AMD64_RBP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbp), 8);
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rbx), 8);
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
-#ifdef PLATFORM_WIN32
- amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
- amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
-#endif
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG1, 8);
+
+ /* Restore all registers except %rip and %r11 */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rax), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rcx), 8);
+ amd64_mov_reg_membase (code, AMD64_RDX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdx), 8);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbx), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rbp), 8);
+ amd64_mov_reg_membase (code, AMD64_RSI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsi), 8);
+ amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rdi), 8);
+ //amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r8), 8);
+ //amd64_mov_reg_membase (code, AMD64_R9, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r9), 8);
+ //amd64_mov_reg_membase (code, AMD64_R10, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r10), 8);
+ amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
+ amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
+ amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
+ amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
+
+ amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
- amd64_mov_reg_membase (code, AMD64_RSP, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsp), 8);
+ /* get return address */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
/* jump to the saved IP */
- amd64_jump_reg (code, AMD64_RAX);
+ amd64_jump_reg (code, AMD64_R11);
inited = TRUE;
return start;
}
-#ifdef PLATFORM_WIN32
-static void
-throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
- guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
- guint64 r14, guint64 r15, guint64 rdi, guint64 rsi, guint64 rethrow)
-#else
+
+/*
+ * The first few arguments are dummy, to force the other arguments to be passed on
+ * the stack, this avoids overwriting the argument registers in the throw trampoline.
+ */
static void
-throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
- guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
- guint64 r14, guint64 r15, guint64 rethrow)
-#endif
+throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
+ guint64 dummy5, guint64 dummy6,
+ MonoObject *exc, guint64 rip, guint64 rsp,
+ guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
+ guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
+ guint64 rax, guint64 rcx, guint64 rdx,
+ guint64 rethrow)
{
static void (*restore_context) (MonoContext *);
MonoContext ctx;
ctx.r13 = r13;
ctx.r14 = r14;
ctx.r15 = r15;
-#ifdef PLATFORM_WIN32
ctx.rdi = rdi;
ctx.rsi = rsi;
-#endif
+ ctx.rax = rax;
+ ctx.rcx = rcx;
+ ctx.rdx = rdx;
if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
/*
code = start;
- /* Exception */
- amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_ARG_REG1, 8);
- /* IP */
- amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RSP, 0, 8);
- /* SP */
- amd64_lea_membase (code, AMD64_ARG_REG3, AMD64_RSP, 8);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
-#ifdef PLATFORM_WIN32
- /* Callee saved regs */
- amd64_mov_reg_reg (code, AMD64_R9, AMD64_RBX, 8);
/* reverse order */
amd64_push_imm (code, rethrow);
+ amd64_push_reg (code, AMD64_RDX);
+ amd64_push_reg (code, AMD64_RCX);
+ amd64_push_reg (code, AMD64_RAX);
amd64_push_reg (code, AMD64_RSI);
amd64_push_reg (code, AMD64_RDI);
amd64_push_reg (code, AMD64_R15);
amd64_push_reg (code, AMD64_R13);
amd64_push_reg (code, AMD64_R12);
amd64_push_reg (code, AMD64_RBP);
+ amd64_push_reg (code, AMD64_RBX);
+
+ /* SP */
+ amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* IP */
+ amd64_push_membase (code, AMD64_R11, 0);
+
+ /* Exception */
+ amd64_push_reg (code, AMD64_ARG_REG1);
+
+#ifdef PLATFORM_WIN32
/* align stack */
amd64_push_imm (code, 0);
amd64_push_imm (code, 0);
amd64_push_imm (code, 0);
amd64_push_imm (code, 0);
-#else
- /* Callee saved regs */
- amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
- amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
- amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
- /* align stack */
amd64_push_imm (code, 0);
- /* reverse order */
- amd64_push_imm (code, rethrow);
- amd64_push_reg (code, AMD64_R15);
- amd64_push_reg (code, AMD64_R14);
- amd64_push_reg (code, AMD64_R13);
+ amd64_push_imm (code, 0);
#endif
amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
sp -= 1;
/* the return addr */
sp [0] = (gpointer)(mctx->rip);
- mctx->rip = (unsigned long)restore_soft_guard_pages;
- mctx->rsp = (unsigned long)sp;
+ mctx->rip = (guint64)restore_soft_guard_pages;
+ mctx->rsp = (guint64)sp;
}
static void
#endif
}
+static guint64
+get_original_ip (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ g_assert (lmf);
+
+ /* Reset the change to previous_lmf */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
+
+ return lmf->rip;
+}
+
+static gpointer
+get_throw_pending_exception (void)
+{
+ static guint8* start;
+ static gboolean inited = FALSE;
+ guint8 *code;
+ guint8 *br[1];
+ gpointer throw_trampoline;
+
+ if (inited)
+ return start;
+
+ start = code = mono_global_codeman_reserve (128);
+
+ /* We are in the frame of a managed method after a call */
+ /*
+ * We would like to throw the pending exception in such a way that it looks to
+ * be thrown from the managed method.
+ */
+
+ /* Save registers which might contain the return value of the call */
+ amd64_push_reg (code, AMD64_RAX);
+ amd64_push_reg (code, AMD64_RDX);
+
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+ amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
+
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the pending exception */
+ amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Check if it is NULL, and branch */
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+
+ /* exc != NULL branch */
+
+ /* Save the exc on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Load exc */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
+
+ /* Pop saved stuff from the stack */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
+
+ /* Setup arguments for the throw trampoline */
+ /* Exception */
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
+ /* The trampoline expects the caller ip to be pushed on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* Call the throw trampoline */
+ throw_trampoline = mono_arch_get_throw_exception ();
+ amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
+ /* We use a jump instead of a call so we can push the original ip on the stack */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* ex == NULL branch */
+ mono_amd64_patch (br [0], code);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
+
+ /* Restore registers */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_pop_reg (code, AMD64_RDX);
+ amd64_pop_reg (code, AMD64_RAX);
+
+ /* Return to original code */
+ amd64_jump_reg (code, AMD64_R11);
+
+ g_assert ((code - start) < 128);
+
+ inited = TRUE;
+
+ return start;
+}
+
+/*
+ * Called when a thread receives an async exception while executing unmanaged code.
+ * Instead of checking for this exception in the managed-to-native wrapper, we hijack
+ * the return address on the stack to point to a helper routine which throws the
+ * exception.
+ */
+void
+mono_arch_notify_pending_exc (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ if (lmf->rsp == 0)
+ /* Initial LMF */
+ return;
+
+ if ((guint64)lmf->previous_lmf & 1)
+ /* Already hijacked or trampoline LMF entry */
+ return;
+
+ /* lmf->rsp is set just before making the call which transitions to unmanaged code */
+ lmf->rip = *(guint64*)(lmf->rsp - 8);
+ /* Signal that lmf->rip is set */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
+
+ *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
+}