#endif /* PLATFORM_WIN32 */
-/*
- * Can't allocate the helper methods in static arrays as on other platforms.
- */
-static MonoCodeManager *code_manager = NULL;
-static CRITICAL_SECTION code_manager_mutex;
-
-void
-mono_amd64_exceptions_init ()
-{
- InitializeCriticalSection (&code_manager_mutex);
- code_manager = mono_code_manager_new ();
-}
-
/*
* mono_arch_get_restore_context:
*
/* restore_contect (MonoContext *ctx) */
- EnterCriticalSection (&code_manager_mutex);
- start = code = mono_code_manager_reserve (code_manager, 1024);
- LeaveCriticalSection (&code_manager_mutex);
+ start = code = mono_global_codeman_reserve (256);
/* get return address */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rip), 8);
if (inited)
return start;
- EnterCriticalSection (&code_manager_mutex);
- start = code = mono_code_manager_reserve (code_manager, 64);
- LeaveCriticalSection (&code_manager_mutex);
+ start = code = mono_global_codeman_reserve (64);
/* call_filter (MonoContext *ctx, unsigned long eip) */
code = start;
static void
throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
- guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
- guint64 r14, guint64 r15, guint64 rethrow)
+ guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
+ guint64 r14, guint64 r15, guint64 rethrow)
{
static void (*restore_context) (MonoContext *);
MonoContext ctx;
if (!restore_context)
restore_context = mono_arch_get_restore_context ();
- /* adjust eip so that it point into the call instruction */
- rip -= 1;
-
ctx.rsp = rsp;
ctx.rip = rip;
ctx.rbx = rbx;
ctx.r14 = r14;
ctx.r15 = r15;
+ if (!rethrow && mono_debugger_throw_exception ((gpointer)(rip - 8), (gpointer)rsp, exc)) {
+ /*
+ * The debugger wants us to stop on the `throw' instruction.
+ * By the time we get here, it already inserted a breakpoint on
+ * eip - 8 (which is the address of the `mov %r15,%rdi ; callq throw').
+ */
+
+ /* FIXME FIXME
+ *
+ * In case of a rethrow, the JIT is emitting code like this:
+ *
+ * mov 0xffffffffffffffd0(%rbp),%rax'
+ * mov %rax,%rdi
+ * callq throw
+ *
+ * Here, restore_context() wouldn't restore the %rax register correctly.
+ */
+ ctx.rip = rip - 8;
+ ctx.rsp = rsp + 8;
+ restore_context (&ctx);
+ g_assert_not_reached ();
+ }
+
+ /* adjust eip so that it point into the call instruction */
+ ctx.rip -= 1;
+
if (mono_object_isinst (exc, mono_defaults.exception_class)) {
MonoException *mono_ex = (MonoException*)exc;
if (!rethrow)
mono_ex->stack_trace = NULL;
}
- mono_handle_exception (&ctx, exc, (gpointer)(rip + 1), FALSE);
+ mono_handle_exception (&ctx, exc, (gpointer)rip, FALSE);
restore_context (&ctx);
g_assert_not_reached ();
guint8* start;
guint8 *code;
- EnterCriticalSection (&code_manager_mutex);
- start = code = mono_code_manager_reserve (code_manager, 64);
- LeaveCriticalSection (&code_manager_mutex);
+ start = code = mono_global_codeman_reserve (64);
code = start;
amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
+ /* align stack */
+ amd64_push_imm (code, 0);
/* reverse order */
amd64_push_imm (code, rethrow);
amd64_push_reg (code, AMD64_R15);
return start;
}
+gpointer
+mono_arch_get_throw_exception_by_name (void)
+{
+ static guint8* start;
+ static gboolean inited = FALSE;
+ guint8 *code;
+
+ if (inited)
+ return start;
+
+ start = code = mono_global_codeman_reserve (64);
+
+ /* Not used on amd64 */
+ amd64_breakpoint (code);
+
+ return start;
+}
+
/**
- * mono_arch_get_throw_exception_by_name:
+ * mono_arch_get_throw_corlib_exception:
*
* Returns a function pointer which can be used to raise
* corlib exceptions. The returned function has the following
- * signature: void (*func) (char *exc_name);
+ * signature: void (*func) (guint32 ex_token, guint32 offset);
+ * Here, offset is the offset which needs to be substracted from the caller IP
+ * to get the IP of the throw. Passing the offset has the advantage that it
+ * needs no relocations in the caller.
*/
gpointer
-mono_arch_get_throw_exception_by_name (void)
+mono_arch_get_throw_corlib_exception (void)
{
static guint8* start;
static gboolean inited = FALSE;
if (inited)
return start;
- EnterCriticalSection (&code_manager_mutex);
- start = code = mono_code_manager_reserve (code_manager, 64);
- LeaveCriticalSection (&code_manager_mutex);
-
- code = start;
+ start = code = mono_global_codeman_reserve (64);
- /* Push return address */
+ /* Push throw_ip */
amd64_push_reg (code, AMD64_RSI);
- /* Call exception_from_name */
- amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RDI, 8);
- amd64_mov_reg_imm (code, AMD64_RSI, "System");
+ /* Call exception_from_token */
+ amd64_mov_reg_reg (code, AMD64_RSI, AMD64_RDI, 8);
amd64_mov_reg_imm (code, AMD64_RDI, mono_defaults.exception_class->image);
-
- amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_name);
+ amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_token);
amd64_call_reg (code, AMD64_R11);
- /* Put the original return address at the top of the misaligned stack */
+ /* Compute throw_ip */
amd64_pop_reg (code, AMD64_RSI);
- amd64_push_reg (code, AMD64_R11);
- amd64_push_reg (code, AMD64_RSI);
+ /* return addr */
+ amd64_pop_reg (code, AMD64_RDX);
+ amd64_alu_reg_reg (code, X86_SUB, AMD64_RDX, AMD64_RSI);
+
+ /* Put the throw_ip at the top of the misaligned stack */
+ amd64_push_reg (code, AMD64_RDX);
throw_ex = (guint64)mono_arch_get_throw_exception ();
if (ji != NULL) {
int offset;
+ gboolean omit_fp = (ji->used_regs & (1 << 31)) > 0;
*new_ctx = *ctx;
* We only need to do this if the exception was raised in managed
* code, since otherwise the lmf was already popped of the stack.
*/
- if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
+ if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
+ new_ctx->rbp = (*lmf)->ebp;
new_ctx->rbx = (*lmf)->rbx;
+ new_ctx->rsp = (*lmf)->rsp;
new_ctx->r12 = (*lmf)->r12;
new_ctx->r13 = (*lmf)->r13;
new_ctx->r14 = (*lmf)->r14;
}
}
else {
- offset = -1;
+ offset = omit_fp ? 0 : -1;
/* restore caller saved registers */
for (i = 0; i < AMD64_NREG; i ++)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
- guint64 reg = *((guint64 *)ctx->SC_EBP + offset);
- offset --;
+ guint64 reg;
+
+ if (omit_fp) {
+ reg = *((guint64*)ctx->rsp + offset);
+ offset ++;
+ }
+ else {
+ reg = *((guint64 *)ctx->SC_EBP + offset);
+ offset --;
+ }
+
switch (i) {
case AMD64_RBX:
new_ctx->rbx = reg;
case AMD64_R15:
new_ctx->r15 = reg;
break;
+ case AMD64_RBP:
+ new_ctx->rbp = reg;
+ break;
default:
g_assert_not_reached ();
}
}
}
- if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
+ if (*lmf && ((*lmf) != jit_tls->first_lmf) && (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
/* remove any unused lmf */
*lmf = (*lmf)->previous_lmf;
}
- /* Pop EBP and the return address */
- new_ctx->SC_ESP = ctx->SC_EBP + (2 * sizeof (gpointer));
- /* we substract 1, so that the IP points into the call instruction */
- new_ctx->SC_EIP = *((guint64 *)ctx->SC_EBP + 1) - 1;
- new_ctx->SC_EBP = *((guint64 *)ctx->SC_EBP);
+ if (omit_fp) {
+ /* Pop frame */
+ new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
+ new_ctx->SC_EIP = *((guint64 *)new_ctx->rsp) - 1;
+ /* Pop return address */
+ new_ctx->rsp += 8;
+ }
+ else {
+ /* Pop EBP and the return address */
+ new_ctx->SC_ESP = ctx->SC_EBP + (2 * sizeof (gpointer));
+ /* we substract 1, so that the IP points into the call instruction */
+ new_ctx->SC_EIP = *((guint64 *)ctx->SC_EBP + 1) - 1;
+ new_ctx->SC_EBP = *((guint64 *)ctx->SC_EBP);
+ }
/* Pop arguments off the stack */
{
- MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (ji->method->signature->param_count + 1));
+ MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
- guint32 stack_to_pop = mono_arch_get_argument_info (ji->method->signature, ji->method->signature->param_count, arg_info);
+ guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
new_ctx->SC_ESP += stack_to_pop;
}
- *res = *ji;
- return res;
+ return ji;
} else if (*lmf) {
*new_ctx = *ctx;
return (gpointer)-1;
if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) {
- *res = *ji;
} else {
memset (res, 0, sizeof (MonoJitInfo));
res->method = (*lmf)->method;
new_ctx->SC_RIP = (*lmf)->rip;
new_ctx->SC_RBP = (*lmf)->ebp;
+ new_ctx->SC_ESP = (*lmf)->rsp;
new_ctx->SC_RBX = (*lmf)->rbx;
new_ctx->SC_R12 = (*lmf)->r12;
new_ctx->SC_R14 = (*lmf)->r14;
new_ctx->SC_R15 = (*lmf)->r15;
- /* the lmf is always stored on the stack, so the following
- * expression points to a stack location which can be used as ESP */
- new_ctx->SC_ESP = ALIGN_TO ((guint64)&((*lmf)->rip), 16);
-
*lmf = (*lmf)->previous_lmf;
- return res;
-
+ return ji ? ji : res;
}
return NULL;