#include <glib.h>
#include <signal.h>
#include <string.h>
-#ifndef PLATFORM_WIN32
-#include <sys/ucontext.h>
+#ifdef HAVE_UCONTEXT_H
+#include <ucontext.h>
#endif
#include <mono/arch/amd64/amd64-codegen.h>
#include "mini.h"
#include "mini-amd64.h"
+#include "tasklets.h"
+#include "debug-mini.h"
#define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
static MonoW32ExceptionHandler fpe_handler;
static MonoW32ExceptionHandler ill_handler;
static MonoW32ExceptionHandler segv_handler;
static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
#define W32_SEH_HANDLE_EX(_ex) \
- if (_ex##_handler) _ex##_handler((int)sctx)
+ if (_ex##_handler) _ex##_handler(0, er, sctx)
/*
* Unhandled Exception Filter
}
/* Copy context back */
- ctx->Rax = sctx->rax;
- ctx->Rbx = sctx->rbx;
- ctx->Rcx = sctx->rcx;
- ctx->Rdx = sctx->rdx;
+ /* Nonvolatile */
+ ctx->Rsp = sctx->rsp;
+ ctx->Rdi = sctx->rdi;
+ ctx->Rsi = sctx->rsi;
+ ctx->Rbx = sctx->rbx;
ctx->Rbp = sctx->rbp;
- ctx->Rsp = sctx->rsp;
- ctx->Rsi = sctx->rsi;
- ctx->Rdi = sctx->rdi;
- ctx->Rip = sctx->rip;
+ ctx->R12 = sctx->r12;
+ ctx->R13 = sctx->r13;
+ ctx->R14 = sctx->r14;
+ ctx->R15 = sctx->r15;
+ ctx->Rip = sctx->rip;
+
+ /* Volatile But should not matter?*/
+ ctx->Rax = sctx->rax;
+ ctx->Rcx = sctx->rcx;
+ ctx->Rdx = sctx->rdx;
g_free (sctx);
}
}
-#endif /* PLATFORM_WIN32 */
+#endif /* TARGET_WIN32 */
/*
* mono_arch_get_restore_context:
* Returns a pointer to a method which restores a previously saved sigcontext.
*/
gpointer
-mono_arch_get_restore_context (void)
+mono_arch_get_restore_context (MonoTrampInfo **info, gboolean aot)
{
- static guint8 *start = NULL;
- static gboolean inited = FALSE;
+ guint8 *start = NULL;
guint8 *code;
-
- if (inited)
- return start;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
/* restore_contect (MonoContext *ctx) */
amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
+#endif
- amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
-
- /* get return address */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
+ if (mono_running_on_valgrind ()) {
+ /* Prevent 'Address 0x... is just below the stack ptr.' errors */
+ amd64_mov_reg_membase (code, AMD64_R8, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
+ amd64_mov_reg_reg (code, AMD64_RSP, AMD64_R8, 8);
+ } else {
+ amd64_mov_reg_membase (code, AMD64_RSP, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rsp), 8);
+ /* get return address */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (MonoContext, rip), 8);
+ }
/* jump to the saved IP */
amd64_jump_reg (code, AMD64_R11);
- inited = TRUE;
+ nacl_global_codeman_validate(&start, 256, &code);
+
+ mono_arch_flush_icache (start, code - start);
+
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("restore_context"), start, code - start, ji, unwind_ops);
return start;
}
* @exc object in this case).
*/
gpointer
-mono_arch_get_call_filter (void)
+mono_arch_get_call_filter (MonoTrampInfo **info, gboolean aot)
{
- static guint8 *start;
- static gboolean inited = FALSE;
+ guint8 *start;
int i;
guint8 *code;
guint32 pos;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
+ const guint kMaxCodeSize = NACL_SIZE (128, 256);
- if (inited)
- return start;
-
- start = code = mono_global_codeman_reserve (128);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* call_filter (MonoContext *ctx, unsigned long eip) */
code = start;
amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
-#ifdef PLATFORM_WIN32
+#endif
+#ifdef TARGET_WIN32
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
#endif
amd64_leave (code);
amd64_ret (code);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
- inited = TRUE;
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
+ mono_arch_flush_icache (start, code - start);
+
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("call_filter"), start, code - start, ji, unwind_ops);
return start;
}
* The first few arguments are dummy, to force the other arguments to be passed on
* the stack, this avoids overwriting the argument registers in the throw trampoline.
*/
-static void
-throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
- guint64 dummy5, guint64 dummy6,
- MonoObject *exc, guint64 rip, guint64 rsp,
- guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
- guint64 r14, guint64 r15, guint64 rdi, guint64 rsi,
- guint64 rax, guint64 rcx, guint64 rdx,
- guint64 rethrow)
+void
+mono_amd64_throw_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
+ guint64 dummy5, guint64 dummy6,
+ mgreg_t *regs, mgreg_t rip,
+ MonoObject *exc, gboolean rethrow)
{
static void (*restore_context) (MonoContext *);
MonoContext ctx;
if (!restore_context)
- restore_context = mono_arch_get_restore_context ();
+ restore_context = mono_get_restore_context ();
- ctx.rsp = rsp;
+ ctx.rsp = regs [AMD64_RSP];
ctx.rip = rip;
- ctx.rbx = rbx;
- ctx.rbp = rbp;
- ctx.r12 = r12;
- ctx.r13 = r13;
- ctx.r14 = r14;
- ctx.r15 = r15;
- ctx.rdi = rdi;
- ctx.rsi = rsi;
- ctx.rax = rax;
- ctx.rcx = rcx;
- ctx.rdx = rdx;
-
- if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
- /*
- * The debugger wants us to stop on the `throw' instruction.
- * By the time we get here, it already inserted a breakpoint on
- * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
- */
-
- /* FIXME FIXME
- *
- * In case of a rethrow, the JIT is emitting code like this:
- *
- * mov 0xffffffffffffffd0(%rbp),%rax'
- * mov %rax,%rdi
- * callq throw
- *
- * Here, restore_context() wouldn't restore the %rax register correctly.
- */
- ctx.rip = rip - 8;
- ctx.rsp = rsp + 8;
- restore_context (&ctx);
- g_assert_not_reached ();
- }
-
- /* adjust eip so that it point into the call instruction */
- ctx.rip -= 1;
+ ctx.rbx = regs [AMD64_RBX];
+ ctx.rbp = regs [AMD64_RBP];
+ ctx.r12 = regs [AMD64_R12];
+ ctx.r13 = regs [AMD64_R13];
+ ctx.r14 = regs [AMD64_R14];
+ ctx.r15 = regs [AMD64_R15];
+ ctx.rdi = regs [AMD64_RDI];
+ ctx.rsi = regs [AMD64_RSI];
+ ctx.rax = regs [AMD64_RAX];
+ ctx.rcx = regs [AMD64_RCX];
+ ctx.rdx = regs [AMD64_RDX];
if (mono_object_isinst (exc, mono_defaults.exception_class)) {
MonoException *mono_ex = (MonoException*)exc;
if (!rethrow)
mono_ex->stack_trace = NULL;
}
+
+ if (mono_debug_using_mono_debugger ()) {
+ guint8 buf [16], *code;
+
+ mono_breakpoint_clean_code (NULL, (gpointer)rip, 8, buf, sizeof (buf));
+ code = buf + 8;
+
+ if (buf [3] == 0xe8) {
+ MonoContext ctx_cp = ctx;
+ ctx_cp.rip = rip - 5;
+
+ if (mono_debugger_handle_exception (&ctx_cp, exc)) {
+ restore_context (&ctx_cp);
+ g_assert_not_reached ();
+ }
+ }
+ }
+
+ /* adjust eip so that it point into the call instruction */
+ ctx.rip -= 1;
+
mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
restore_context (&ctx);
g_assert_not_reached ();
}
+void
+mono_amd64_throw_corlib_exception (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
+ guint64 dummy5, guint64 dummy6,
+ mgreg_t *regs, mgreg_t rip,
+ guint32 ex_token_index, gint64 pc_offset)
+{
+ guint32 ex_token = MONO_TOKEN_TYPE_DEF | ex_token_index;
+ MonoException *ex;
+
+ ex = mono_exception_from_token (mono_defaults.exception_class->image, ex_token);
+
+ rip -= pc_offset;
+
+ /* Negate the ip adjustment done in mono_amd64_throw_exception () */
+ rip += 1;
+
+ mono_amd64_throw_exception (dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, regs, rip, (MonoObject*)ex, FALSE);
+}
+
+static void
+mono_amd64_resume_unwind (guint64 dummy1, guint64 dummy2, guint64 dummy3, guint64 dummy4,
+ guint64 dummy5, guint64 dummy6,
+ mgreg_t *regs, mgreg_t rip,
+ guint32 dummy7, gint64 dummy8)
+{
+ /* Only the register parameters are valid */
+ MonoContext ctx;
+
+ ctx.rsp = regs [AMD64_RSP];
+ ctx.rip = rip;
+ ctx.rbx = regs [AMD64_RBX];
+ ctx.rbp = regs [AMD64_RBP];
+ ctx.r12 = regs [AMD64_R12];
+ ctx.r13 = regs [AMD64_R13];
+ ctx.r14 = regs [AMD64_R14];
+ ctx.r15 = regs [AMD64_R15];
+ ctx.rdi = regs [AMD64_RDI];
+ ctx.rsi = regs [AMD64_RSI];
+ ctx.rax = regs [AMD64_RAX];
+ ctx.rcx = regs [AMD64_RCX];
+ ctx.rdx = regs [AMD64_RDX];
+
+ mono_resume_unwind (&ctx);
+}
+
+/*
+ * get_throw_trampoline:
+ *
+ * Generate a call to mono_amd64_throw_exception/
+ * mono_amd64_throw_corlib_exception.
+ */
static gpointer
-get_throw_trampoline (gboolean rethrow)
+get_throw_trampoline (MonoTrampInfo **info, gboolean rethrow, gboolean corlib, gboolean llvm_abs, gboolean resume_unwind, const char *tramp_name, gboolean aot)
{
guint8* start;
guint8 *code;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
+ int i, stack_size, arg_offsets [16], regs_offset;
+ const guint kMaxCodeSize = NACL_SIZE (256, 512);
- start = code = mono_global_codeman_reserve (64);
-
- code = start;
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
- amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
+ /* The stack is unaligned on entry */
+ stack_size = 192 + 8;
-#ifdef PLATFORM_WIN32
- // FIXME: Sync with the !WIN32 code below
- NOT_IMPLEMENTED;
+ code = start;
- /* align stack */
- amd64_push_imm (code, 0);
- amd64_push_imm (code, 0);
- amd64_push_imm (code, 0);
- amd64_push_imm (code, 0);
-#else
- /* No need to align stack */
- //amd64_push_imm (code, 0);
+ if (info)
+ unwind_ops = mono_arch_get_cie_program ();
- /* reverse order */
- amd64_push_imm (code, rethrow);
- amd64_push_reg (code, AMD64_RDX);
- amd64_push_reg (code, AMD64_RCX);
- amd64_push_reg (code, AMD64_RAX);
- amd64_push_reg (code, AMD64_RSI);
- amd64_push_reg (code, AMD64_RDI);
- amd64_push_reg (code, AMD64_R15);
- amd64_push_reg (code, AMD64_R14);
- amd64_push_reg (code, AMD64_R13);
- amd64_push_reg (code, AMD64_R12);
- amd64_push_reg (code, AMD64_RBP);
- amd64_push_reg (code, AMD64_RBX);
+ /* Alloc frame */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, stack_size);
+ if (info)
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, stack_size + 8);
- /* SP */
- amd64_lea_membase (code, AMD64_RAX, AMD64_R11, 8);
- amd64_push_reg (code, AMD64_RAX);
+ /*
+ * To hide linux/windows calling convention differences, we pass all arguments on
+ * the stack by passing 6 dummy values in registers.
+ */
- /* IP */
- amd64_push_membase (code, AMD64_R11, 0);
+ arg_offsets [0] = 0;
+ arg_offsets [1] = sizeof(mgreg_t);
+ arg_offsets [2] = sizeof(mgreg_t) * 2;
+ arg_offsets [3] = sizeof(mgreg_t) * 3;
+ regs_offset = sizeof(mgreg_t) * 4;
- /* Exception */
- amd64_push_reg (code, AMD64_ARG_REG1);
-#endif
+ /* Save registers */
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (i != AMD64_RSP)
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
+ /* Save RSP */
+ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
+ /* Set arg1 == regs */
+ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
+ /* Set arg2 == eip */
+ if (llvm_abs)
+ amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
+ else
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
+ /* Set arg3 == exc/ex_token_index */
+ if (resume_unwind)
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
+ else
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
+ /* Set arg4 == rethrow/pc offset */
+ if (resume_unwind) {
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
+ } else if (corlib) {
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
+ if (llvm_abs)
+ /*
+ * The caller is LLVM code which passes the absolute address not a pc offset,
+ * so compensate by passing 0 as 'rip' and passing the negated abs address as
+ * the pc offset.
+ */
+ amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
+ } else {
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
+ }
- amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? (mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
+ }
amd64_call_reg (code, AMD64_R11);
amd64_breakpoint (code);
- g_assert ((code - start) < 64);
+ mono_arch_flush_icache (start, code - start);
+
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
+ if (info)
+ *info = mono_tramp_info_create (g_strdup (tramp_name), start, code - start, ji, unwind_ops);
return start;
}
* signature: void (*func) (MonoException *exc);
*
*/
-gpointer
-mono_arch_get_throw_exception (void)
+gpointer
+mono_arch_get_throw_exception (MonoTrampInfo **info, gboolean aot)
{
- static guint8* start;
- static gboolean inited = FALSE;
-
- if (inited)
- return start;
-
- start = get_throw_trampoline (FALSE);
-
- inited = TRUE;
-
- return start;
+ return get_throw_trampoline (info, FALSE, FALSE, FALSE, FALSE, "throw_exception", aot);
}
gpointer
-mono_arch_get_rethrow_exception (void)
+mono_arch_get_rethrow_exception (MonoTrampInfo **info, gboolean aot)
{
- static guint8* start;
- static gboolean inited = FALSE;
-
- if (inited)
- return start;
-
- start = get_throw_trampoline (TRUE);
-
- inited = TRUE;
-
- return start;
-}
-
-gpointer
-mono_arch_get_throw_exception_by_name (void)
-{
- static guint8* start;
- static gboolean inited = FALSE;
- guint8 *code;
-
- if (inited)
- return start;
-
- start = code = mono_global_codeman_reserve (64);
-
- /* Not used on amd64 */
- amd64_breakpoint (code);
-
- return start;
+ return get_throw_trampoline (info, TRUE, FALSE, FALSE, FALSE, "rethrow_exception", aot);
}
/**
* needs no relocations in the caller.
*/
gpointer
-mono_arch_get_throw_corlib_exception (void)
+mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot)
{
- static guint8* start;
- static gboolean inited = FALSE;
- guint8 *code;
- guint64 throw_ex;
-
- if (inited)
- return start;
-
- start = code = mono_global_codeman_reserve (64);
-
- /* Push throw_ip */
- amd64_push_reg (code, AMD64_ARG_REG2);
-
- /* Call exception_from_token */
- amd64_mov_reg_reg (code, AMD64_ARG_REG2, AMD64_ARG_REG1, 8);
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, mono_defaults.exception_class->image);
- amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
- amd64_call_reg (code, AMD64_R11);
-
- /* Compute throw_ip */
- amd64_pop_reg (code, AMD64_ARG_REG2);
- /* return addr */
- amd64_pop_reg (code, AMD64_ARG_REG3);
- amd64_alu_reg_reg (code, X86_SUB, AMD64_ARG_REG3, AMD64_ARG_REG2);
-
- /* Put the throw_ip at the top of the misaligned stack */
- amd64_push_reg (code, AMD64_ARG_REG3);
-
- throw_ex = (guint64)mono_arch_get_throw_exception ();
-
- /* Call throw_exception */
- amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, 8);
- amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
- /* The original IP is on the stack */
- amd64_jump_reg (code, AMD64_R11);
-
- g_assert ((code - start) < 64);
-
- inited = TRUE;
-
- return start;
+ return get_throw_trampoline (info, FALSE, TRUE, FALSE, FALSE, "throw_corlib_exception", aot);
}
-/* mono_arch_find_jit_info:
+/*
+ * mono_arch_find_jit_info:
*
- * This function is used to gather information from @ctx. It return the
- * MonoJitInfo of the corresponding function, unwinds one stack frame and
- * stores the resulting context into @new_ctx. It also stores a string
- * describing the stack location into @trace (if not NULL), and modifies
- * the @lmf if necessary. @native_offset return the IP offset from the
- * start of the function or -1 if that info is not available.
+ * This function is used to gather information from @ctx, and store it in @frame_info.
+ * It unwinds one stack frame, and stores the resulting context into @new_ctx. @lmf
+ * is modified if needed.
+ * Returns TRUE on success, FALSE otherwise.
*/
-MonoJitInfo *
-mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
- MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
- gboolean *managed)
+gboolean
+mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
+ MonoJitInfo *ji, MonoContext *ctx,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
+ StackFrameInfo *frame)
{
- MonoJitInfo *ji;
- int i;
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
- /* Avoid costly table lookup during stack overflow */
- if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
- ji = prev_ji;
- else
- ji = mono_jit_info_table_find (domain, ip);
-
- if (managed)
- *managed = FALSE;
+ memset (frame, 0, sizeof (StackFrameInfo));
+ frame->ji = ji;
+ frame->managed = FALSE;
*new_ctx = *ctx;
if (ji != NULL) {
- int offset;
- gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
-
- if (managed)
- if (!ji->method->wrapper_type)
- *managed = TRUE;
-
- /*
- * If a method has save_lmf set, then register save/restore code is not generated
- * by the JIT, so we have to restore callee saved registers from the lmf.
- */
- if (ji->method->save_lmf) {
- MonoLMF *lmf_addr;
-
- /*
- * *lmf might not point to the LMF pushed by this method, so compute the LMF
- * address instead.
- */
- if (omit_fp)
- lmf_addr = (MonoLMF*)ctx->rsp;
- else
- lmf_addr = (MonoLMF*)(ctx->rbp - sizeof (MonoLMF));
-
- new_ctx->rbp = lmf_addr->rbp;
- new_ctx->rbx = lmf_addr->rbx;
- new_ctx->r12 = lmf_addr->r12;
- new_ctx->r13 = lmf_addr->r13;
- new_ctx->r14 = lmf_addr->r14;
- new_ctx->r15 = lmf_addr->r15;
- }
- else {
- offset = omit_fp ? 0 : -1;
- /* restore caller saved registers */
- for (i = 0; i < AMD64_NREG; i ++)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
- guint64 reg;
-
- if (omit_fp) {
- reg = *((guint64*)ctx->rsp + offset);
- offset ++;
- }
- else {
- reg = *((guint64 *)ctx->rbp + offset);
- offset --;
- }
-
- switch (i) {
- case AMD64_RBX:
- new_ctx->rbx = reg;
- break;
- case AMD64_R12:
- new_ctx->r12 = reg;
- break;
- case AMD64_R13:
- new_ctx->r13 = reg;
- break;
- case AMD64_R14:
- new_ctx->r14 = reg;
- break;
- case AMD64_R15:
- new_ctx->r15 = reg;
- break;
- case AMD64_RBP:
- new_ctx->rbp = reg;
- break;
- default:
- g_assert_not_reached ();
- }
- }
- }
+ mgreg_t regs [MONO_MAX_IREGS + 1];
+ guint8 *cfa;
+ guint32 unwind_info_len;
+ guint8 *unwind_info;
+
+ frame->type = FRAME_TYPE_MANAGED;
+
+ if (!ji->method->wrapper_type || ji->method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD)
+ frame->managed = TRUE;
+
+ if (ji->from_aot)
+ unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
+ else
+ unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
+
+ frame->unwind_info = unwind_info;
+ frame->unwind_info_len = unwind_info_len;
+
+ regs [AMD64_RAX] = new_ctx->rax;
+ regs [AMD64_RBX] = new_ctx->rbx;
+ regs [AMD64_RCX] = new_ctx->rcx;
+ regs [AMD64_RDX] = new_ctx->rdx;
+ regs [AMD64_RBP] = new_ctx->rbp;
+ regs [AMD64_RSP] = new_ctx->rsp;
+ regs [AMD64_RSI] = new_ctx->rsi;
+ regs [AMD64_RDI] = new_ctx->rdi;
+ regs [AMD64_RIP] = new_ctx->rip;
+ regs [AMD64_R12] = new_ctx->r12;
+ regs [AMD64_R13] = new_ctx->r13;
+ regs [AMD64_R14] = new_ctx->r14;
+ regs [AMD64_R15] = new_ctx->r15;
+
+ mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
+ (guint8*)ji->code_start + ji->code_size,
+ ip, regs, MONO_MAX_IREGS + 1,
+ save_locations, MONO_MAX_IREGS, &cfa);
+
+ new_ctx->rax = regs [AMD64_RAX];
+ new_ctx->rbx = regs [AMD64_RBX];
+ new_ctx->rcx = regs [AMD64_RCX];
+ new_ctx->rdx = regs [AMD64_RDX];
+ new_ctx->rbp = regs [AMD64_RBP];
+ new_ctx->rsp = regs [AMD64_RSP];
+ new_ctx->rsi = regs [AMD64_RSI];
+ new_ctx->rdi = regs [AMD64_RDI];
+ new_ctx->rip = regs [AMD64_RIP];
+ new_ctx->r12 = regs [AMD64_R12];
+ new_ctx->r13 = regs [AMD64_R13];
+ new_ctx->r14 = regs [AMD64_R14];
+ new_ctx->r15 = regs [AMD64_R15];
+
+ /* The CFA becomes the new SP value */
+ new_ctx->rsp = (mgreg_t)cfa;
+
+ /* Adjust IP */
+ new_ctx->rip --;
if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
/* remove any unused lmf */
- *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
- }
-
- if (omit_fp) {
- /* Pop frame */
- new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
- new_ctx->rip = *((guint64 *)new_ctx->rsp) - 1;
- /* Pop return address */
- new_ctx->rsp += 8;
- }
- else {
- /* Pop EBP and the return address */
- new_ctx->rsp = ctx->rbp + (2 * sizeof (gpointer));
- /* we substract 1, so that the IP points into the call instruction */
- new_ctx->rip = *((guint64 *)ctx->rbp + 1) - 1;
- new_ctx->rbp = *((guint64 *)ctx->rbp);
+ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
}
+#ifndef MONO_AMD64_NO_PUSHES
/* Pop arguments off the stack */
{
MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
new_ctx->rsp += stack_to_pop;
}
+#endif
- return ji;
+ return TRUE;
} else if (*lmf) {
guint64 rip;
+ if (((guint64)(*lmf)->previous_lmf) & 2) {
+ /*
+ * This LMF entry is created by the soft debug code to mark transitions to
+ * managed code done during invokes.
+ */
+ MonoLMFExt *ext = (MonoLMFExt*)(*lmf);
+
+ g_assert (ext->debugger_invoke);
+
+ memcpy (new_ctx, &ext->ctx, sizeof (MonoContext));
+
+ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
+
+ frame->type = FRAME_TYPE_DEBUGGER_INVOKE;
+
+ return TRUE;
+ }
+
if (((guint64)(*lmf)->previous_lmf) & 1) {
/* This LMF has the rip field set */
rip = (*lmf)->rip;
} else if ((*lmf)->rsp == 0) {
/* Top LMF entry */
- return (gpointer)-1;
+ return FALSE;
} else {
/*
* The rsp field is set just before the call which transitioned to native
* code. Obtain the rip from the stack.
*/
- rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
+ rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
}
- ji = mono_jit_info_table_find (domain, (gpointer)rip);
- if (!ji) {
- if (!(*lmf)->method)
- /* Top LMF entry */
- return (gpointer)-1;
- /* Trampoline lmf frame */
- memset (res, 0, sizeof (MonoJitInfo));
- res->method = (*lmf)->method;
- }
+ ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
+ /*
+ * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
+ * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
+ * return address.
+ */
+ //g_assert (ji);
+ if (!ji)
+ return FALSE;
+
+ /* Adjust IP */
+ rip --;
+
+ frame->ji = ji;
+ frame->type = FRAME_TYPE_MANAGED_TO_NATIVE;
new_ctx->rip = rip;
new_ctx->rbp = (*lmf)->rbp;
new_ctx->r13 = (*lmf)->r13;
new_ctx->r14 = (*lmf)->r14;
new_ctx->r15 = (*lmf)->r15;
+#ifdef TARGET_WIN32
+ new_ctx->rdi = (*lmf)->rdi;
+ new_ctx->rsi = (*lmf)->rsi;
+#endif
- *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~1);
+ *lmf = (gpointer)(((guint64)(*lmf)->previous_lmf) & ~3);
- return ji ? ji : res;
+ return TRUE;
}
- return NULL;
+ return FALSE;
+}
+
+/*
+ * handle_exception:
+ *
+ * Called by resuming from a signal handler.
+ */
+static void
+handle_signal_exception (gpointer obj, gboolean test_only)
+{
+ MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ MonoContext ctx;
+ static void (*restore_context) (MonoContext *);
+
+ if (!restore_context)
+ restore_context = mono_get_restore_context ();
+
+ memcpy (&ctx, &jit_tls->ex_ctx, sizeof (MonoContext));
+
+ if (mono_debugger_handle_exception (&ctx, (MonoObject *)obj))
+ return;
+
+ mono_handle_exception (&ctx, obj, MONO_CONTEXT_GET_IP (&ctx), test_only);
+
+ restore_context (&ctx);
}
/**
gboolean
mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
{
+#if defined(MONO_ARCH_USE_SIGACTION)
+ ucontext_t *ctx = (ucontext_t*)sigctx;
+
+ /*
+ * Handling the exception in the signal handler is problematic, since the original
+ * signal is disabled, and we could run arbitrary code though the debugger. So
+ * resume into the normal stack and do most work there if possible.
+ */
+ MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ guint64 sp = UCONTEXT_REG_RSP (ctx);
+
+ /* Pass the ctx parameter in TLS */
+ mono_arch_sigctx_to_monoctx (ctx, &jit_tls->ex_ctx);
+ /* The others in registers */
+ UCONTEXT_REG_RDI (ctx) = (guint64)obj;
+ UCONTEXT_REG_RSI (ctx) = test_only;
+
+ /* Allocate a stack frame below the red zone */
+ sp -= 128;
+ /* The stack should be unaligned */
+ if (sp % 8 == 0)
+ sp -= 8;
+ UCONTEXT_REG_RSP (ctx) = sp;
+
+ UCONTEXT_REG_RIP (ctx) = (guint64)handle_signal_exception;
+
+ return TRUE;
+#else
MonoContext mctx;
mono_arch_sigctx_to_monoctx (sigctx, &mctx);
+ if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
+ return TRUE;
+
mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), test_only);
mono_arch_monoctx_to_sigctx (&mctx, sigctx);
return TRUE;
-}
-
-#ifdef MONO_ARCH_USE_SIGACTION
-static inline guint64*
-gregs_from_ucontext (ucontext_t *ctx)
-{
-#ifdef __FreeBSD__
- guint64 *gregs = (guint64 *) &ctx->uc_mcontext;
-#else
- guint64 *gregs = (guint64 *) &ctx->uc_mcontext.gregs;
#endif
-
- return gregs;
}
-#endif
+
void
mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
{
-#ifdef MONO_ARCH_USE_SIGACTION
+#if defined(__native_client_codegen__) || defined(__native_client__)
+ printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
+#endif
+
+#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
- guint64 *gregs = gregs_from_ucontext (ctx);
-
- mctx->rax = gregs [REG_RAX];
- mctx->rbx = gregs [REG_RBX];
- mctx->rcx = gregs [REG_RCX];
- mctx->rdx = gregs [REG_RDX];
- mctx->rbp = gregs [REG_RBP];
- mctx->rsp = gregs [REG_RSP];
- mctx->rsi = gregs [REG_RSI];
- mctx->rdi = gregs [REG_RDI];
- mctx->rip = gregs [REG_RIP];
- mctx->r12 = gregs [REG_R12];
- mctx->r13 = gregs [REG_R13];
- mctx->r14 = gregs [REG_R14];
- mctx->r15 = gregs [REG_R15];
+ mctx->rax = UCONTEXT_REG_RAX (ctx);
+ mctx->rbx = UCONTEXT_REG_RBX (ctx);
+ mctx->rcx = UCONTEXT_REG_RCX (ctx);
+ mctx->rdx = UCONTEXT_REG_RDX (ctx);
+ mctx->rbp = UCONTEXT_REG_RBP (ctx);
+ mctx->rsp = UCONTEXT_REG_RSP (ctx);
+ mctx->rsi = UCONTEXT_REG_RSI (ctx);
+ mctx->rdi = UCONTEXT_REG_RDI (ctx);
+ mctx->rip = UCONTEXT_REG_RIP (ctx);
+ mctx->r12 = UCONTEXT_REG_R12 (ctx);
+ mctx->r13 = UCONTEXT_REG_R13 (ctx);
+ mctx->r14 = UCONTEXT_REG_R14 (ctx);
+ mctx->r15 = UCONTEXT_REG_R15 (ctx);
#else
MonoContext *ctx = (MonoContext *)sigctx;
void
mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
{
-#ifdef MONO_ARCH_USE_SIGACTION
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
+#endif
+
+#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
- guint64 *gregs = gregs_from_ucontext (ctx);
-
- gregs [REG_RAX] = mctx->rax;
- gregs [REG_RBX] = mctx->rbx;
- gregs [REG_RCX] = mctx->rcx;
- gregs [REG_RDX] = mctx->rdx;
- gregs [REG_RBP] = mctx->rbp;
- gregs [REG_RSP] = mctx->rsp;
- gregs [REG_RSI] = mctx->rsi;
- gregs [REG_RDI] = mctx->rdi;
- gregs [REG_RIP] = mctx->rip;
- gregs [REG_R12] = mctx->r12;
- gregs [REG_R13] = mctx->r13;
- gregs [REG_R14] = mctx->r14;
- gregs [REG_R15] = mctx->r15;
+ UCONTEXT_REG_RAX (ctx) = mctx->rax;
+ UCONTEXT_REG_RBX (ctx) = mctx->rbx;
+ UCONTEXT_REG_RCX (ctx) = mctx->rcx;
+ UCONTEXT_REG_RDX (ctx) = mctx->rdx;
+ UCONTEXT_REG_RBP (ctx) = mctx->rbp;
+ UCONTEXT_REG_RSP (ctx) = mctx->rsp;
+ UCONTEXT_REG_RSI (ctx) = mctx->rsi;
+ UCONTEXT_REG_RDI (ctx) = mctx->rdi;
+ UCONTEXT_REG_RIP (ctx) = mctx->rip;
+ UCONTEXT_REG_R12 (ctx) = mctx->r12;
+ UCONTEXT_REG_R13 (ctx) = mctx->r13;
+ UCONTEXT_REG_R14 (ctx) = mctx->r14;
+ UCONTEXT_REG_R15 (ctx) = mctx->r15;
#else
MonoContext *ctx = (MonoContext *)sigctx;
gpointer
mono_arch_ip_from_context (void *sigctx)
{
-
-#ifdef MONO_ARCH_USE_SIGACTION
-
+#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
- guint64 *gregs = gregs_from_ucontext (ctx);
-
- return (gpointer)gregs [REG_RIP];
+ return (gpointer)UCONTEXT_REG_RIP (ctx);
#else
MonoContext *ctx = sigctx;
return (gpointer)ctx->rip;
sp -= 1;
/* the return addr */
sp [0] = (gpointer)(mctx->rip);
- mctx->rip = (unsigned long)restore_soft_guard_pages;
- mctx->rsp = (unsigned long)sp;
+ mctx->rip = (guint64)restore_soft_guard_pages;
+ mctx->rsp = (guint64)sp;
}
static void
void (*restore_context) (MonoContext *);
MonoContext mctx;
- restore_context = mono_arch_get_restore_context ();
+ restore_context = mono_get_restore_context ();
mono_arch_sigctx_to_monoctx (sigctx, &mctx);
+
+ if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
+ if (stack_ovf)
+ prepare_for_guard_pages (&mctx);
+ restore_context (&mctx);
+ }
+
mono_handle_exception (&mctx, obj, MONO_CONTEXT_GET_IP (&mctx), FALSE);
if (stack_ovf)
prepare_for_guard_pages (&mctx);
void
mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
{
-#ifdef MONO_ARCH_USE_SIGACTION
+#if defined(MONO_ARCH_USE_SIGACTION) && defined(UCONTEXT_GREGS)
MonoException *exc = NULL;
ucontext_t *ctx = (ucontext_t*)sigctx;
- guint64 *gregs = gregs_from_ucontext (ctx);
- MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (gpointer)gregs [REG_RIP]);
+ MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_RIP (sigctx), NULL);
gpointer *sp;
int frame_size;
frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4 + 128;
frame_size += 15;
frame_size &= ~15;
- sp = (gpointer)(gregs [REG_RSP] & ~15);
+ sp = (gpointer)(UCONTEXT_REG_RSP (sigctx) & ~15);
sp = (gpointer)((char*)sp - frame_size);
/* the arguments must be aligned */
- sp [-1] = (gpointer)gregs [REG_RIP];
+ sp [-1] = (gpointer)UCONTEXT_REG_RIP (sigctx);
/* may need to adjust pointers in the new struct copy, depending on the OS */
memcpy (sp + 4, ctx, sizeof (ucontext_t));
/* at the return form the signal handler execution starts in altstack_handle_and_restore() */
- gregs [REG_RIP] = (unsigned long)altstack_handle_and_restore;
- gregs [REG_RSP] = (unsigned long)(sp - 1);
- gregs [REG_RDI] = (unsigned long)(sp + 4);
- gregs [REG_RSI] = (guint64)exc;
- gregs [REG_RDX] = stack_ovf;
+ UCONTEXT_REG_RIP (sigctx) = (unsigned long)altstack_handle_and_restore;
+ UCONTEXT_REG_RSP (sigctx) = (unsigned long)(sp - 1);
+ UCONTEXT_REG_RDI (sigctx) = (unsigned long)(sp + 4);
+ UCONTEXT_REG_RSI (sigctx) = (guint64)exc;
+ UCONTEXT_REG_RDX (sigctx) = stack_ovf;
#endif
}
-static guint64
-get_original_ip (void)
+guint64
+mono_amd64_get_original_ip (void)
{
MonoLMF *lmf = mono_get_lmf ();
return lmf->rip;
}
-static gpointer
-get_throw_pending_exception (void)
+gpointer
+mono_arch_get_throw_pending_exception (MonoTrampInfo **info, gboolean aot)
{
- static guint8* start;
- static gboolean inited = FALSE;
- guint8 *code;
+ guint8 *code, *start;
guint8 *br[1];
gpointer throw_trampoline;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
+ const guint kMaxCodeSize = NACL_SIZE (128, 256);
- if (inited)
- return start;
-
- start = code = mono_global_codeman_reserve (128);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* We are in the frame of a managed method after a call */
/*
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
/* Obtain the pending exception */
- amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_get_and_clear_pending_exception");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, mono_thread_get_and_clear_pending_exception);
+ }
amd64_call_reg (code, AMD64_R11);
/* Check if it is NULL, and branch */
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
/* Obtain the original ip and clear the flag in previous_lmf */
- amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
+ }
amd64_call_reg (code, AMD64_R11);
/* Load exc */
amd64_push_reg (code, AMD64_RAX);
/* Call the throw trampoline */
- throw_trampoline = mono_arch_get_throw_exception ();
- amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_throw_exception");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ throw_trampoline = mono_get_throw_exception ();
+ amd64_mov_reg_imm (code, AMD64_R11, throw_trampoline);
+ }
/* We use a jump instead of a call so we can push the original ip on the stack */
amd64_jump_reg (code, AMD64_R11);
mono_amd64_patch (br [0], code);
/* Obtain the original ip and clear the flag in previous_lmf */
- amd64_mov_reg_imm (code, AMD64_R11, get_original_ip);
+ if (aot) {
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_get_original_ip");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, mono_amd64_get_original_ip);
+ }
amd64_call_reg (code, AMD64_R11);
amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
/* Return to original code */
amd64_jump_reg (code, AMD64_R11);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
- inited = TRUE;
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
return start;
}
+static gpointer throw_pending_exception;
+
/*
* Called when a thread receives an async exception while executing unmanaged code.
* Instead of checking for this exception in the managed-to-native wrapper, we hijack
{
MonoLMF *lmf = mono_get_lmf ();
+ if (!lmf)
+ /* Not yet started */
+ return;
+
if (lmf->rsp == 0)
/* Initial LMF */
return;
/* Signal that lmf->rip is set */
lmf->previous_lmf = (gpointer)((guint64)lmf->previous_lmf | 1);
- *(gpointer*)(lmf->rsp - 8) = get_throw_pending_exception ();
+ *(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
+}
+
+GSList*
+mono_amd64_get_exception_trampolines (gboolean aot)
+{
+ MonoTrampInfo *info;
+ GSList *tramps = NULL;
+
+ mono_arch_get_throw_pending_exception (&info, aot);
+ tramps = g_slist_prepend (tramps, info);
+
+ /* LLVM needs different throw trampolines */
+ get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
+ tramps = g_slist_prepend (tramps, info);
+
+ get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
+ tramps = g_slist_prepend (tramps, info);
+
+ get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
+ tramps = g_slist_prepend (tramps, info);
+
+ return tramps;
+}
+
+void
+mono_arch_exceptions_init (void)
+{
+ GSList *tramps, *l;
+ gpointer tramp;
+
+ if (mono_aot_only) {
+ throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
+ tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
+ mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
+ tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
+ mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
+ tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
+ mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
+ } else {
+ /* Call this to avoid initialization races */
+ throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
+
+ tramps = mono_amd64_get_exception_trampolines (FALSE);
+ for (l = tramps; l; l = l->next) {
+ MonoTrampInfo *info = l->data;
+
+ mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
+ mono_save_trampoline_xdebug_info (info);
+ mono_tramp_info_free (info);
+ }
+ g_slist_free (tramps);
+ }
+}
+
+#ifdef TARGET_WIN32
+
+/*
+ * The mono_arch_unwindinfo* methods are used to build and add
+ * function table info for each emitted method from mono. On Winx64
+ * the seh handler will not be called if the mono methods are not
+ * added to the function table.
+ *
+ * We should not need to add non-volatile register info to the
+ * table since mono stores that info elsewhere. (Except for the register
+ * used for the fp.)
+ */
+
+#define MONO_MAX_UNWIND_CODES 22
+
+typedef union _UNWIND_CODE {
+ struct {
+ guchar CodeOffset;
+ guchar UnwindOp : 4;
+ guchar OpInfo : 4;
+ };
+ gushort FrameOffset;
+} UNWIND_CODE, *PUNWIND_CODE;
+
+typedef struct _UNWIND_INFO {
+ guchar Version : 3;
+ guchar Flags : 5;
+ guchar SizeOfProlog;
+ guchar CountOfCodes;
+ guchar FrameRegister : 4;
+ guchar FrameOffset : 4;
+ /* custom size for mono allowing for mono allowing for*/
+ /*UWOP_PUSH_NONVOL ebp offset = 21*/
+ /*UWOP_ALLOC_LARGE : requires 2 or 3 offset = 20*/
+ /*UWOP_SET_FPREG : requires 2 offset = 17*/
+ /*UWOP_PUSH_NONVOL offset = 15-0*/
+ UNWIND_CODE UnwindCode[MONO_MAX_UNWIND_CODES];
+
+/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
+ * union {
+ * OPTIONAL ULONG ExceptionHandler;
+ * OPTIONAL ULONG FunctionEntry;
+ * };
+ * OPTIONAL ULONG ExceptionData[]; */
+} UNWIND_INFO, *PUNWIND_INFO;
+
+typedef struct
+{
+ RUNTIME_FUNCTION runtimeFunction;
+ UNWIND_INFO unwindInfo;
+} MonoUnwindInfo, *PMonoUnwindInfo;
+
+static void
+mono_arch_unwindinfo_create (gpointer* monoui)
+{
+ PMonoUnwindInfo newunwindinfo;
+ *monoui = newunwindinfo = g_new0 (MonoUnwindInfo, 1);
+ newunwindinfo->unwindInfo.Version = 1;
+}
+
+void
+mono_arch_unwindinfo_add_push_nonvol (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
+{
+ PMonoUnwindInfo unwindinfo;
+ PUNWIND_CODE unwindcode;
+ guchar codeindex;
+ if (!*monoui)
+ mono_arch_unwindinfo_create (monoui);
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+
+ if (unwindinfo->unwindInfo.CountOfCodes >= MONO_MAX_UNWIND_CODES)
+ g_error ("Larger allocation needed for the unwind information.");
+
+ codeindex = MONO_MAX_UNWIND_CODES - (++unwindinfo->unwindInfo.CountOfCodes);
+ unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
+ unwindcode->UnwindOp = 0; /*UWOP_PUSH_NONVOL*/
+ unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
+ unwindcode->OpInfo = reg;
+
+ if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
+ g_error ("Adding unwind info in wrong order.");
+
+ unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
+}
+
+void
+mono_arch_unwindinfo_add_set_fpreg (gpointer* monoui, gpointer codebegin, gpointer nextip, guchar reg )
+{
+ PMonoUnwindInfo unwindinfo;
+ PUNWIND_CODE unwindcode;
+ guchar codeindex;
+ if (!*monoui)
+ mono_arch_unwindinfo_create (monoui);
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+
+ if (unwindinfo->unwindInfo.CountOfCodes + 1 >= MONO_MAX_UNWIND_CODES)
+ g_error ("Larger allocation needed for the unwind information.");
+
+ codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += 2);
+ unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
+ unwindcode->FrameOffset = 0; /*Assuming no frame pointer offset for mono*/
+ unwindcode++;
+ unwindcode->UnwindOp = 3; /*UWOP_SET_FPREG*/
+ unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
+ unwindcode->OpInfo = reg;
+
+ unwindinfo->unwindInfo.FrameRegister = reg;
+
+ if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
+ g_error ("Adding unwind info in wrong order.");
+
+ unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
+}
+
+void
+mono_arch_unwindinfo_add_alloc_stack (gpointer* monoui, gpointer codebegin, gpointer nextip, guint size )
+{
+ PMonoUnwindInfo unwindinfo;
+ PUNWIND_CODE unwindcode;
+ guchar codeindex;
+ guchar codesneeded;
+ if (!*monoui)
+ mono_arch_unwindinfo_create (monoui);
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+
+ if (size < 0x8)
+ g_error ("Stack allocation must be equal to or greater than 0x8.");
+
+ if (size <= 0x80)
+ codesneeded = 1;
+ else if (size <= 0x7FFF8)
+ codesneeded = 2;
+ else
+ codesneeded = 3;
+
+ if (unwindinfo->unwindInfo.CountOfCodes + codesneeded > MONO_MAX_UNWIND_CODES)
+ g_error ("Larger allocation needed for the unwind information.");
+
+ codeindex = MONO_MAX_UNWIND_CODES - (unwindinfo->unwindInfo.CountOfCodes += codesneeded);
+ unwindcode = &unwindinfo->unwindInfo.UnwindCode[codeindex];
+
+ if (codesneeded == 1) {
+ /*The size of the allocation is
+ (the number in the OpInfo member) times 8 plus 8*/
+ unwindcode->OpInfo = (size - 8)/8;
+ unwindcode->UnwindOp = 2; /*UWOP_ALLOC_SMALL*/
+ }
+ else {
+ if (codesneeded == 3) {
+ /*the unscaled size of the allocation is recorded
+ in the next two slots in little-endian format*/
+ *((unsigned int*)(&unwindcode->FrameOffset)) = size;
+ unwindcode += 2;
+ unwindcode->OpInfo = 1;
+ }
+ else {
+ /*the size of the allocation divided by 8
+ is recorded in the next slot*/
+ unwindcode->FrameOffset = size/8;
+ unwindcode++;
+ unwindcode->OpInfo = 0;
+
+ }
+ unwindcode->UnwindOp = 1; /*UWOP_ALLOC_LARGE*/
+ }
+
+ unwindcode->CodeOffset = (((guchar*)nextip)-((guchar*)codebegin));
+
+ if (unwindinfo->unwindInfo.SizeOfProlog >= unwindcode->CodeOffset)
+ g_error ("Adding unwind info in wrong order.");
+
+ unwindinfo->unwindInfo.SizeOfProlog = unwindcode->CodeOffset;
+}
+
+guint
+mono_arch_unwindinfo_get_size (gpointer monoui)
+{
+ PMonoUnwindInfo unwindinfo;
+ if (!monoui)
+ return 0;
+
+ unwindinfo = (MonoUnwindInfo*)monoui;
+ return (8 + sizeof (MonoUnwindInfo)) -
+ (sizeof (UNWIND_CODE) * (MONO_MAX_UNWIND_CODES - unwindinfo->unwindInfo.CountOfCodes));
+}
+
+PRUNTIME_FUNCTION
+MONO_GET_RUNTIME_FUNCTION_CALLBACK ( DWORD64 ControlPc, IN PVOID Context )
+{
+ MonoJitInfo *ji;
+ guint64 pos;
+ PMonoUnwindInfo targetinfo;
+ MonoDomain *domain = mono_domain_get ();
+
+ ji = mini_jit_info_table_find (domain, (char*)ControlPc, NULL);
+ if (!ji)
+ return 0;
+
+ pos = (guint64)(((char*)ji->code_start) + ji->code_size);
+
+ targetinfo = (PMonoUnwindInfo)ALIGN_TO (pos, 8);
+
+ targetinfo->runtimeFunction.UnwindData = ((DWORD64)&targetinfo->unwindInfo) - ((DWORD64)Context);
+
+ return &targetinfo->runtimeFunction;
+}
+
+void
+mono_arch_unwindinfo_install_unwind_info (gpointer* monoui, gpointer code, guint code_size)
+{
+ PMonoUnwindInfo unwindinfo, targetinfo;
+ guchar codecount;
+ guint64 targetlocation;
+ if (!*monoui)
+ return;
+
+ unwindinfo = (MonoUnwindInfo*)*monoui;
+ targetlocation = (guint64)&(((guchar*)code)[code_size]);
+ targetinfo = (PMonoUnwindInfo) ALIGN_TO(targetlocation, 8);
+
+ unwindinfo->runtimeFunction.EndAddress = code_size;
+ unwindinfo->runtimeFunction.UnwindData = ((guchar*)&targetinfo->unwindInfo) - ((guchar*)code);
+
+ memcpy (targetinfo, unwindinfo, sizeof (MonoUnwindInfo) - (sizeof (UNWIND_CODE) * MONO_MAX_UNWIND_CODES));
+
+ codecount = unwindinfo->unwindInfo.CountOfCodes;
+ if (codecount) {
+ memcpy (&targetinfo->unwindInfo.UnwindCode[0], &unwindinfo->unwindInfo.UnwindCode[MONO_MAX_UNWIND_CODES-codecount],
+ sizeof (UNWIND_CODE) * unwindinfo->unwindInfo.CountOfCodes);
+ }
+
+ g_free (unwindinfo);
+ *monoui = 0;
+
+ RtlInstallFunctionTableCallback (((DWORD64)code) | 0x3, (DWORD64)code, code_size, MONO_GET_RUNTIME_FUNCTION_CALLBACK, code, NULL);
+}
+
+#endif
+
+#if MONO_SUPPORT_TASKLETS
+MonoContinuationRestore
+mono_tasklets_arch_restore (void)
+{
+ static guint8* saved = NULL;
+ guint8 *code, *start;
+ int cont_reg = AMD64_R9; /* register usable on both call conventions */
+ const guint kMaxCodeSize = NACL_SIZE (64, 128);
+
+
+ if (saved)
+ return (MonoContinuationRestore)saved;
+ code = start = mono_global_codeman_reserve (kMaxCodeSize);
+ /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
+ /* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
+ * state is in AMD64_ARG_REG2 ($rdx or $rsi)
+ * lmf_addr is in AMD64_ARG_REG3 ($r8 or $rdx)
+ * We move cont to cont_reg since we need both rcx and rdi for the copy
+ * state is moved to $rax so it's setup as the return value and we can overwrite $rsi
+ */
+ amd64_mov_reg_reg (code, cont_reg, MONO_AMD64_ARG_REG1, 8);
+ amd64_mov_reg_reg (code, AMD64_RAX, MONO_AMD64_ARG_REG2, 8);
+ /* setup the copy of the stack */
+ amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), sizeof (int));
+ amd64_shift_reg_imm (code, X86_SHR, AMD64_RCX, 3);
+ x86_cld (code);
+ amd64_mov_reg_membase (code, AMD64_RSI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, saved_stack), sizeof (gpointer));
+ amd64_mov_reg_membase (code, AMD64_RDI, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_sp), sizeof (gpointer));
+ amd64_prefix (code, X86_REP_PREFIX);
+ amd64_movsl (code);
+
+ /* now restore the registers from the LMF */
+ amd64_mov_reg_membase (code, AMD64_RCX, cont_reg, G_STRUCT_OFFSET (MonoContinuation, lmf), 8);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbx), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rbp), 8);
+ amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
+ amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
+ amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
+#if !defined(__native_client_codegen__)
+ amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
+#endif
+#ifdef TARGET_WIN32
+ amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
+ amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
+#endif
+ amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsp), 8);
+
+ /* restore the lmf chain */
+ /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
+ x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
+
+ /* state is already in rax */
+ amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
+ g_assert ((code - start) <= kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
+ saved = start;
+ return (MonoContinuationRestore)saved;
+}
+#endif
+
+/*
+ * mono_arch_setup_resume_sighandler_ctx:
+ *
+ * Setup CTX so execution continues at FUNC.
+ */
+void
+mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
+{
+ /*
+ * When resuming from a signal handler, the stack should be misaligned, just like right after
+ * a call.
+ */
+ if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
+ MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
+ MONO_CONTEXT_SET_IP (ctx, func);
}