+/*
+ * this function modifies mctx so that when it is restored, it
+ * won't execcute starting at mctx.eip, but in a function that
+ * will restore the protection on the soft-guard pages and return back to
+ * continue at mctx.eip.
+ */
+static void
+prepare_for_guard_pages (MonoContext *mctx)
+{
+ gpointer *sp;
+ sp = (gpointer)(mctx->rsp);
+ sp -= 1;
+ /* the return addr */
+ sp [0] = (gpointer)(mctx->rip);
+ mctx->rip = (unsigned long)restore_soft_guard_pages;
+ mctx->rsp = (unsigned long)sp;
+}
+
+static void
+altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
+{
+ void (*restore_context) (MonoContext *);
+ MonoContext mctx;
+
+ restore_context = mono_arch_get_restore_context ();
+ mono_arch_sigctx_to_monoctx (sigctx, &mctx);
+ mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
+ if (stack_ovf)
+ prepare_for_guard_pages (&mctx);
+ restore_context (&mctx);
+}
+
+void
+mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
+{
+#ifdef MONO_ARCH_USE_SIGACTION
+ MonoException *exc = NULL;
+ ucontext_t *ctx = (ucontext_t*)sigctx;
+ guint64 *gregs = gregs_from_ucontext (ctx);
+ MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
+ gpointer *sp;
+ int frame_size;
+
+ if (stack_ovf)
+ exc = mono_domain_get ()->stack_overflow_ex;
+ if (!ji)
+ mono_handle_native_sigsegv (SIGSEGV, sigctx);
+
+ /* setup a call frame on the real stack so that control is returned there
+ * and exception handling can continue.
+ * The frame looks like:
+ * ucontext struct
+ * ...
+ * return ip
+ * 128 is the size of the red zone
+ */
+ frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
+ frame_size += 15;
+ frame_size &= ~15;
+ sp = (gpointer)(gregs [REG_RSP] & ~15);
+ sp = (gpointer)((char*)sp - frame_size);
+ /* the arguments must be aligned */
+ sp [-1] = (gpointer)gregs [REG_RIP];
+ /* may need to adjust pointers in the new struct copy, depending on the OS */
+ memcpy (sp + 4, ctx, sizeof (ucontext_t));
+ /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
+ gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
+ gregs [REG_RSP] = (unsigned long)(sp - 1);
+ gregs [REG_RDI] = (unsigned long)(sp + 4);
+ gregs [REG_RSI] = (guint64)exc;
+ gregs [REG_RDX] = stack_ovf;
+#endif
+}
+
+static guint64
+get_original_ip (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ g_assert (lmf);
+
+ /* Reset the change to previous_lmf */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf & ~1);
+
+ return lmf->rip;
+}
+
+static gpointer
+get_throw_pending_exception (void)
+{
+ static guint8* start;
+ static gboolean inited = FALSE;
+ guint8 *code;
+ guint8 *br[1];
+ gpointer throw_trampoline;
+
+ if (inited)
+ return start;
+
+ start = code = mono_global_codeman_reserve (128);
+
+ /* We are in the frame of a managed method after a call */
+ /*
+ * We would like to throw the pending exception in such a way that it looks to
+ * be thrown from the managed method.
+ */
+
+ /* Save registers which might contain the return value of the call */
+ amd64_push_reg (code, AMD64_RAX);
+ amd64_push_reg (code, AMD64_RDX);
+
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+ amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
+
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the pending exception */
+ amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Check if it is NULL, and branch */
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+
+ /* exc != NULL branch */
+
+ /* Save the exc on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Load exc */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
+
+ /* Pop saved stuff from the stack */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
+
+ /* Setup arguments for the throw trampoline */
+ /* Exception */
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
+ /* The trampoline expects the caller ip to be pushed on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* Call the throw trampoline */
+ throw_trampoline = mono_arch_get_throw_exception ();
+ amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
+ /* We use a jump instead of a call so we can push the original ip on the stack */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* ex == NULL branch */
+ mono_amd64_patch (br [0], code);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
+
+ /* Restore registers */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_pop_reg (code, AMD64_RDX);
+ amd64_pop_reg (code, AMD64_RAX);
+
+ /* Return to original code */
+ amd64_jump_reg (code, AMD64_R11);
+
+ g_assert ((code - start) < 128);
+
+ inited = TRUE;
+
+ return start;
+}
+
+/*
+ * Called when a thread receives an async exception while executing unmanaged code.
+ * Instead of checking for this exception in the managed-to-native wrapper, we hijack
+ * the return address on the stack to point to a helper routine which throws the
+ * exception.
+ */
+void
+mono_arch_notify_pending_exc (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ if (lmf->rsp == 0)
+ /* Initial LMF */
+ return;
+
+ if ((guint64)lmf->previous_lmf & 1)
+ /* Already hijacked or trampoline LMF entry */
+ return;
+
+ /* lmf->rsp is set just before making the call which transitions to unmanaged code */
+ lmf->rip = *(guint64*)(lmf->rsp - 8);
+ /* Signal that lmf->rip is set */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
+
+ *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
+}