+ guint8 *code, *start;
+ guint8 *br[1];
+ gpointer throw_trampoline;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
+
+ start = code = mono_global_codeman_reserve (128);
+
+ /* We are in the frame of a managed method after a call */
+ /*
+ * We would like to throw the pending exception in such a way that it looks to
+ * be thrown from the managed method.
+ */
+
+ /* Save registers which might contain the return value of the call */
+ amd64_push_reg (code, AMD64_RAX);
+ amd64_push_reg (code, AMD64_RDX);
+
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+ amd64_movsd_membase_reg (code, AMD64_RSP, 0, AMD64_XMM0);
+
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the pending exception */
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
+ }
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Check if it is NULL, and branch */
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+
+ /* exc != NULL branch */
+
+ /* Save the exc on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+ /* Align stack */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
+ }
+ amd64_call_reg (code, AMD64_R11);
+
+ /* Load exc */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, 8, 8);
+
+ /* Pop saved stuff from the stack */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 6 * 8);
+
+ /* Setup arguments for the throw trampoline */
+ /* Exception */
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_R11, 8);
+ /* The trampoline expects the caller ip to be pushed on the stack */
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* Call the throw trampoline */
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ throw_trampoline = mono_get_throw_exception ();
+ amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
+ }
+ /* We use a jump instead of a call so we can push the original ip on the stack */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* ex == NULL branch */
+ mono_amd64_patch (br [0], code);
+
+ /* Obtain the original ip and clear the flag in previous_lmf */
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
+ }
+ amd64_call_reg (code, AMD64_R11);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
+
+ /* Restore registers */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_movsd_reg_membase (code, AMD64_XMM0, AMD64_RSP, 0);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+ amd64_pop_reg (code, AMD64_RDX);
+ amd64_pop_reg (code, AMD64_RAX);
+
+ /* Return to original code */
+ amd64_jump_reg (code, AMD64_R11);
+
+ g_assert ((code - start) < 128);
+
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
+
+ return start;
+}
+
+static gpointer throw_pending_exception;
+
+/*
+ * Called when a thread receives an async exception while executing unmanaged code.
+ * Instead of checking for this exception in the managed-to-native wrapper, we hijack
+ * the return address on the stack to point to a helper routine which throws the
+ * exception.
+ */
+void
+mono_arch_notify_pending_exc (void)
+{
+ MonoLMF *lmf = mono_get_lmf ();
+
+ if (!lmf)
+ /* Not yet started */
+ return;
+
+ if (lmf->rsp == 0)
+ /* Initial LMF */
+ return;
+
+ if ((guint64)lmf->previous_lmf & 1)
+ /* Already hijacked or trampoline LMF entry */
+ return;
+
+ /* lmf->rsp is set just before making the call which transitions to unmanaged code */
+ lmf->rip = *(guint64*)(lmf->rsp - 8);
+ /* Signal that lmf->rip is set */
+ lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
+
+ *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
+}
+
+void
+mono_arch_exceptions_init (void)
+{
+ guint8 *tramp;
+
+ if (mono_aot_only) {
+ throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
+ } else {
+ /* Call this to avoid initialization races */
+ throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
+
+ /* LLVM needs different throw trampolines */
+ tramp = get_throw_trampoline (NULL, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", FALSE);
+ mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
+
+ tramp = get_throw_trampoline (NULL, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", FALSE);
+ mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
+
+ tramp = get_throw_trampoline (NULL, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
+ mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
+ }
+}
+
+#ifdef TARGET_WIN32
+
+/*
+ * The mono_arch_unwindinfo* methods are used to build and add
+ * function table info for each emitted method from mono. On Winx64
+ * the seh handler will not be called if the mono methods are not
+ * added to the function table.
+ *
+ * We should not need to add non-volatile register info to the
+ * table since mono stores that info elsewhere. (Except for the register
+ * used for the fp.)
+ */
+
+#define MONO_MAX_UNWIND_CODES 22
+
+typedef union _UNWIND_CODE {
+ struct {
+ guchar CodeOffset;
+ guchar UnwindOp : 4;
+ guchar OpInfo : 4;
+ };
+ gushort FrameOffset;
+} UNWIND_CODE, *PUNWIND_CODE;
+
+typedef struct _UNWIND_INFO {
+ guchar Version : 3;
+ guchar Flags : 5;
+ guchar SizeOfProlog;
+ guchar CountOfCodes;
+ guchar FrameRegister : 4;
+ guchar FrameOffset : 4;
+ /* custom size for mono allowing for mono allowing for*/
+ /*UWOP_PUSH_NONVOL ebp offset = 21*/
+ /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
+ /*UWOP_SET_FPREG : requires 2 offset = 17*/
+ /*UWOP_PUSH_NONVOL offset = 15-0*/
+ UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
+
+/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
+ * union {
+ * OPTIONAL ULONG ExceptionHandler;
+ * OPTIONAL ULONG FunctionEntry;
+ * };
+ * OPTIONAL ULONG ExceptionData[]; */
+} UNWIND_INFO, *PUNWIND_INFO;
+
+typedef struct
+{
+ RUNTIME_FUNCTION runtimeFunction;
+ UNWIND_INFO unwindInfo;
+} MonoUnwindInfo, *PMonoUnwindInfo;
+
+static void
+mono_arch_unwindinfo_create (gpointer* monoui)
+{
+ PMonoUnwindInfo newunwindinfo;
+ *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
+ newunwindinfo->unwindInfo.Version = 1;
+}
+
+void
+mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
+{
+ PMonoUnwindInfo unwindinfo;
+ PUNWIND_CODE unwindcode;
+ guchar codeindex;
+ if (!*monoui)
+ mono_arch_unwindinfo_create (monoui);
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+
+ if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
+ g_error ("Larger allocation needed for the unwind information.");
+
+ codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
+ unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
+ unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
+ unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
+ unwindcode->OpInfo = reg;
+
+ if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
+ g_error ("Adding unwind info in wrong order.");
+
+ unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
+}
+
+void
+mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
+{
+ PMonoUnwindInfo unwindinfo;
+ PUNWIND_CODE unwindcode;
+ guchar codeindex;
+ if (!*monoui)
+ mono_arch_unwindinfo_create (monoui);
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+
+ if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
+ g_error ("Larger allocation needed for the unwind information.");
+
+ codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
+ unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
+ unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
+ unwindcode++;
+ unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
+ unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
+ unwindcode->OpInfo = reg;
+
+ unwindinfo->unwindInfo.FrameRegister = reg;
+
+ if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
+ g_error ("Adding unwind info in wrong order.");
+
+ unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
+}
+
+void
+mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
+{
+ PMonoUnwindInfo unwindinfo;
+ PUNWIND_CODE unwindcode;
+ guchar codeindex;
+ guchar codesneeded;
+ if (!*monoui)
+ mono_arch_unwindinfo_create (monoui);
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+
+ if (size < 0x8)
+ g_error ("Stack allocation must be equal to or greater than 0x8.");
+
+ if (size <= 0x80)
+ codesneeded = 1;
+ else if (size <= 0x7FFF8)
+ codesneeded = 2;
+ else
+ codesneeded = 3;
+
+ if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
+ g_error ("Larger allocation needed for the unwind information.");
+
+ codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
+ unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
+
+ if (codesneeded == 1) {
+ /*The size of the allocation is
+ (the number in the OpInfo member) times 8 plus 8*/
+ unwindcode->OpInfo = (size - 8)/8;
+ unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
+ }
+ else {
+ if (codesneeded == 3) {
+ /*the unscaled size of the allocation is recorded
+ in the next two slots in little-endian format*/
+ *((unsigned int*)(&unwindcode->FrameOffset)) = size;
+ unwindcode += 2;
+ unwindcode->OpInfo = 1;
+ }
+ else {
+ /*the size of the allocation divided by 8
+ is recorded in the next slot*/
+ unwindcode->FrameOffset = size/8;
+ unwindcode++;
+ unwindcode->OpInfo = 0;
+
+ }
+ unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
+ }