-/*
- * tramp-amd64.c: JIT trampoline code for amd64
+/**
+ * \file
+ * JIT trampoline code for amd64
*
* Authors:
* Dietmar Maurer (dietmar@ximian.com)
* Zoltan Varga (vargaz@gmail.com)
+ * Johan Lorensson (lateralusx.github@gmail.com)
*
* (C) 2001 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/tabledefs.h>
-#include <mono/metadata/mono-debug-debugger.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/gc-internals.h>
#include <mono/arch/amd64/amd64-codegen.h>
#include "mini-amd64.h"
#include "debugger-agent.h"
-#if defined(__native_client_codegen__) && defined(__native_client__)
-#include <malloc.h>
-#include <nacl/nacl_dyncode.h>
+#ifdef ENABLE_INTERPRETER
+#include "interp/interp.h"
#endif
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
+#ifndef DISABLE_JIT
/*
* mono_arch_get_unbox_trampoline:
* @m: method pointer
{
guint8 *code, *start;
GSList *unwind_ops;
- int this_reg, size = NACL_SIZE (20, 32);
+ int this_reg, size = 20;
MonoDomain *domain = mono_domain_get ();
this_reg = mono_arch_get_this_arg_reg (NULL);
- start = code = (guint8 *)mono_domain_code_reserve (domain, size);
+ start = code = (guint8 *)mono_domain_code_reserve (domain, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
unwind_ops = mono_arch_get_cie_program ();
amd64_mov_reg_imm (code, AMD64_RAX, addr);
amd64_jump_reg (code, AMD64_RAX);
g_assert ((code - start) < size);
-
- nacl_domain_code_validate (domain, &start, size, &code);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
mono_arch_flush_icache (start, code - start);
- mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
+ MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
/*
* mono_arch_get_static_rgctx_trampoline:
*
- * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
+ * Create a trampoline which sets RGCTX_REG to ARG, then jumps to ADDR.
*/
gpointer
-mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
+mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
{
guint8 *code, *start;
GSList *unwind_ops;
#else
/* AOTed code could still have a non-32 bit address */
if ((((guint64)addr) >> 32) == 0)
- buf_len = NACL_SIZE (16, 32);
+ buf_len = 16;
else
- buf_len = NACL_SIZE (30, 32);
+ buf_len = 30;
#endif
- start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len);
+ start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
unwind_ops = mono_arch_get_cie_program ();
- amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
+ amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, arg);
amd64_jump_code (code, addr);
g_assert ((code - start) < buf_len);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
- nacl_domain_code_validate (domain, &start, buf_len, &code);
mono_arch_flush_icache (start, code - start);
- mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
+ MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
return start;
}
+#endif /* !DISABLE_JIT */
#ifdef _WIN64
// Workaround lack of Valgrind support for 64-bit Windows
void
mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
{
-#if defined(__default_codegen__)
guint8 *code;
guint8 buf [16];
gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
addr = thunk_start;
g_assert ((((guint64)(addr)) >> 32) == 0);
mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
- mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
+ MONO_PROFILER_RAISE (jit_code_buffer, (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
}
if (can_write) {
InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
}
}
-#elif defined(__native_client__)
- /* These are essentially the same 2 cases as above, modified for NaCl*/
-
- /* Target must be bundle-aligned */
- g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
- /* Return target must be bundle-aligned */
- g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
-
- if (orig_code[-5] == 0xe8) {
- /* Direct call */
- int ret;
- gint32 offset = (gint32)addr - (gint32)orig_code;
- guint8 buf[sizeof(gint32)];
- *((gint32*)(buf)) = offset;
- ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
- g_assert (ret == 0);
- }
-
- else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
- int ret;
- guint8 buf[sizeof(gint32)];
- *((gint32 *)(buf)) = addr;
- /* orig_code[-14] is the start of the immediate. */
- ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
- g_assert (ret == 0);
- }
- else {
- g_assert_not_reached ();
- }
-
- return;
-#endif
}
+#ifndef DISABLE_JIT
guint8*
mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
{
*(guint64*)thunk_code = (guint64)addr;
addr = thunk_start;
mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
- mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
+ MONO_PROFILER_RAISE (jit_code_buffer, (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
return addr;
}
+#endif /* !DISABLE_JIT */
void
mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
gint32 disp;
gpointer *plt_jump_table_entry;
-#if defined(__default_codegen__)
/* A PLT entry: jmp *<DISP>(%rip) */
g_assert (code [0] == 0xff);
g_assert (code [1] == 0x25);
disp = *(gint32*)(code + 2);
plt_jump_table_entry = (gpointer*)(code + 6 + disp);
-#elif defined(__native_client_codegen__)
- /* A PLT entry: */
- /* mov <DISP>(%rip), %r11d */
- /* nacljmp *%r11 */
-
- /* Verify the 'mov' */
- g_assert (code [0] == 0x45);
- g_assert (code [1] == 0x8b);
- g_assert (code [2] == 0x1d);
-
- disp = *(gint32*)(code + 3);
-
- /* 7 = 3 (mov opcode) + 4 (disp) */
- /* This needs to resolve to the target of the RIP-relative offset */
- plt_jump_table_entry = (gpointer*)(code + 7 + disp);
-
-#endif /* __native_client_codegen__ */
InterlockedExchangePointer (plt_jump_table_entry, addr);
}
+#ifndef DISABLE_JIT
static void
stack_unaligned (MonoTrampolineType tramp_type)
{
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
char *tramp_name;
- guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
- int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, ctx_offset, saved_regs_offset;
+ guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
+ int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, ex_offset, tramp_offset, ctx_offset, saved_regs_offset;
int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
gboolean has_caller;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
-
-#if defined(__native_client_codegen__)
- const guint kNaClTrampOffset = 17;
-#endif
+ const guint kMaxCodeSize = 630;
if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
has_caller = FALSE;
else
has_caller = TRUE;
- code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
+ code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
/* Compute stack frame size and offsets */
offset = 0;
offset += sizeof(mgreg_t);
rax_offset = -offset;
+ offset += sizeof(mgreg_t);
+ ex_offset = -offset;
+
offset += sizeof(mgreg_t);
r11_save_offset = -offset;
orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
+ mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
/* Compute the trampoline address from the return address */
if (aot) {
-#if defined(__default_codegen__)
/* 7 = length of call *<offset>(rip) */
amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
-#elif defined(__native_client_codegen__)
- amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
-#endif
} else {
/* 5 = length of amd64_call_membase () */
amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
}
+ /* cfa = rbp + cfa_offset */
+ mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (mgreg_t)));
}
for (i = 0; i < 8; ++i)
amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
/* Check that the stack is aligned */
-#if defined(__default_codegen__)
amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
}
mono_amd64_patch (br [0], code);
//amd64_breakpoint (code);
-#endif
if (tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
/* Obtain the trampoline argument which is encoded in the instruction stream */
if (aot) {
/* Load the GOT offset */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
-#if defined(__default_codegen__)
/*
* r11 points to a call *<offset>(%rip) instruction, load the
* pc-relative offset from the instruction itself.
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
/* 7 is the length of the call, 8 is the offset to the next got slot */
amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
-#elif defined(__native_client_codegen__)
- /* The arg is hidden in a "push imm32" instruction, */
- /* add one to skip the opcode. */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
-#endif
/* Compute the address of the GOT slot */
amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
/* Load the value */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
} else {
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
-#if defined(__default_codegen__)
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
mono_amd64_patch (br [0], code);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
mono_amd64_patch (br [1], code);
-#elif defined(__native_client_codegen__)
- /* All args are 32-bit pointers in NaCl */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
-#endif
}
amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
} else {
amd64_mov_reg_imm (code, AMD64_R11, tramp);
}
amd64_call_reg (code, AMD64_R11);
-
- /* Check for thread interruption */
- /* This is not perf critical code so no need to check the interrupt flag */
- /*
- * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
- */
amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
- if (aot) {
- code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
- } else {
- amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
- }
- amd64_call_reg (code, AMD64_R11);
-
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
/* Restore LMF */
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
* Save rax to the stack, after the leave instruction, this will become part of
* the red zone.
*/
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
+ /* Check for thread interruption */
+ /* This is not perf critical code so no need to check the interrupt flag */
+ /*
+ * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
+ */
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
+ }
+ amd64_call_reg (code, AMD64_R11);
+
+ amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
+ br_ex_check = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+
+ /*
+ * Exception case:
+ * We have an exception we want to throw in the caller's frame, so pop
+ * the trampoline frame and throw from the caller.
+ */
+#if TARGET_WIN32
+ amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
+ amd64_pop_reg (code, AMD64_RBP);
+ mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
+#else
+ amd64_leave (code);
+#endif
+ /* We are in the parent frame, the exception is in rax */
+ /*
+ * EH is initialized after trampolines, so get the address of the variable
+ * which contains throw_exception, and load it from there.
+ */
+ if (aot) {
+ /* Not really a jit icall */
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_throw_exception_addr ());
+ }
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
+ /*
+ * We still have the original return value on the top of the stack, so the
+ * throw trampoline will use that as the throw site.
+ */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* Normal case */
+ mono_amd64_patch (br_ex_check, code);
+
/* Restore argument registers, r10 (imt method/rgxtx)
and rax (needed for direct calls to C vararg functions). */
for (i = 0; i < AMD64_NREG; ++i)
amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
/* Restore stack */
+#if TARGET_WIN32
+ amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
+ amd64_pop_reg (code, AMD64_RBP);
+ mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
+#else
amd64_leave (code);
+#endif
cfa_offset -= sizeof (mgreg_t);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
-
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
/* Load result */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
}
g_assert ((code - buf) <= kMaxCodeSize);
-
- nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
mono_arch_flush_icache (buf, code - buf);
- mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
+ MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
tramp_name = mono_get_generic_trampoline_name (tramp_type);
*info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
tramp = mono_get_trampoline_code (tramp_type);
-#if defined(__default_codegen__)
if ((((guint64)arg1) >> 32) == 0)
size = 5 + 1 + 4;
else
size += 16;
code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
}
-#elif defined(__native_client_codegen__)
- size = 5 + 1 + 4;
- /* Aligning the call site below could */
- /* add up to kNaClAlignment-1 bytes */
- size += (kNaClAlignment-1);
- size = NACL_BUNDLE_ALIGN_UP (size);
- buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
- code = buf;
-#endif
if (far_addr) {
amd64_mov_reg_imm (code, AMD64_R11, tramp);
amd64_call_code (code, tramp);
}
/* The trampoline code will obtain the argument from the instruction stream */
-#if defined(__default_codegen__)
if ((((guint64)arg1) >> 32) == 0) {
*code = 0x4;
*(guint32*)(code + 1) = (gint64)arg1;
*(guint64*)(code + 1) = (gint64)arg1;
code += 9;
}
-#elif defined(__native_client_codegen__)
- /* For NaCl, all tramp args are 32-bit because they're pointers */
- *code = 0x68; /* push imm32 */
- *(guint32*)(code + 1) = (gint32)arg1;
- code += 5;
-#endif
g_assert ((code - buf) <= size);
if (code_len)
*code_len = size;
- nacl_domain_code_validate(domain, &buf, size, &code);
-
mono_arch_flush_icache (buf, size);
- mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
+ MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
return buf;
}
index -= size - 1;
}
- tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
+ tramp_size = 64 + 8 * depth;
- code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
unwind_ops = mono_arch_get_cie_program ();
amd64_jump_code (code, tramp);
}
- nacl_global_codeman_validate (&buf, tramp_size, &code);
mono_arch_flush_icache (buf, code - buf);
- mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
+ MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
g_assert (code - buf <= tramp_size);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
char *name = mono_get_rgctx_fetch_trampoline_name (slot);
*info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
return buf;
}
+gpointer
+mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
+{
+ guint8 *code, *buf;
+ int tramp_size;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops;
+
+ g_assert (aot);
+ tramp_size = 64;
+
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
+
+ unwind_ops = mono_arch_get_cie_program ();
+
+ // FIXME: Currently, we always go to the slow path.
+ /* This receives a <slot, trampoline> in the rgctx arg reg. */
+ /* Load trampoline addr */
+ amd64_mov_reg_membase (code, AMD64_R11, MONO_ARCH_RGCTX_REG, 8, 8);
+ /* move the rgctx pointer to the VTABLE register */
+ amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
+ /* Jump to the trampoline */
+ amd64_jump_reg (code, AMD64_R11);
+
+ mono_arch_flush_icache (buf, code - buf);
+ MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
+
+ g_assert (code - buf <= tramp_size);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
+
+ if (info)
+ *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
+
+ return buf;
+}
+
void
mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
{
x86_push_imm (code, (guint64)func_arg);
amd64_call_reg (code, AMD64_R11);
}
+#endif /* !DISABLE_JIT */
-
-static void
-handler_block_trampoline_helper (gpointer *ptr)
+gpointer
+mono_amd64_handler_block_trampoline_helper (void)
{
- MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
- *ptr = jit_tls->handler_block_return_address;
+ MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
+ return jit_tls->handler_block_return_address;
}
+#ifndef DISABLE_JIT
gpointer
mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
{
- guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
guint8 *code, *buf;
int tramp_size = 64;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops;
- g_assert (!aot);
-
- code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
unwind_ops = mono_arch_get_cie_program ();
/*
- This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
- */
- if (mono_get_jit_tls_offset () != -1) {
- code = mono_amd64_emit_tls_get (code, MONO_AMD64_ARG_REG1, mono_get_jit_tls_offset ());
- amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, MONO_AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
- /* Simulate a call */
- amd64_push_reg (code, AMD64_RAX);
- mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 16);
- amd64_jump_code (code, tramp);
+ * This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
+ * We get here from the ret emitted by CEE_ENDFINALLY.
+ * The stack is misaligned.
+ */
+ /* Align the stack before the call to mono_amd64_handler_block_trampoline_helper() */
+#ifdef TARGET_WIN32
+ /* Also make room for the "register parameter stack area" as specified by the Windows x64 ABI (4 64-bit registers) */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8 + 4 * 8);
+#else
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+#endif
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_handler_block_trampoline_helper");
+ amd64_call_reg (code, AMD64_R11);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_RAX, mono_amd64_handler_block_trampoline_helper);
+ amd64_call_reg (code, AMD64_RAX);
+ }
+ /* Undo stack alignment */
+#ifdef TARGET_WIN32
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8 + 4 * 8);
+#else
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
+#endif
+ /* Save the result to the stack */
+ amd64_push_reg (code, AMD64_RAX);
+#ifdef TARGET_WIN32
+ /* Make room for the "register parameter stack area" as specified by the Windows x64 ABI (4 64-bit registers) */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 4 * 8);
+#endif
+ if (aot) {
+ char *name = g_strdup_printf ("trampoline_func_%d", MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, name);
+ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_R11, 8);
} else {
- /*Slow path uses a c helper*/
- amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RSP, 8);
- amd64_mov_reg_imm (code, AMD64_RAX, tramp);
- amd64_push_reg (code, AMD64_RAX);
- mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 16);
- amd64_push_reg (code, AMD64_RAX);
- mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 24);
- amd64_jump_code (code, handler_block_trampoline_helper);
+ amd64_mov_reg_imm (code, AMD64_RAX, mono_get_trampoline_func (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD));
}
+ /* The stack is aligned */
+ amd64_call_reg (code, AMD64_RAX);
+#ifdef TARGET_WIN32
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 4 * 8);
+#endif
+ /* Load return address */
+ amd64_pop_reg (code, AMD64_RAX);
+ /* The stack is misaligned, thats what the code we branch to expects */
+ amd64_jump_reg (code, AMD64_RAX);
mono_arch_flush_icache (buf, code - buf);
- mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
+ MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
g_assert (code - buf <= tramp_size);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
*info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
return buf;
}
+#endif /* !DISABLE_JIT */
/*
* mono_arch_get_call_target:
guint32
mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
{
-#if defined(__native_client__) || defined(__native_client_codegen__)
- /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
- /* See aot-compiler.c arch_emit_plt_entry for details. */
- return *(guint32*)(plt_entry + 18);
-#else
return *(guint32*)(plt_entry + 6);
-#endif
}
+#ifndef DISABLE_JIT
/*
* mono_arch_create_sdb_trampoline:
*
guint8*
mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
{
- int tramp_size = 256;
+ int tramp_size = 512;
int i, framesize, ctx_offset, cfa_offset, gregs_offset;
guint8 *code, *buf;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
framesize = 0;
#ifdef TARGET_WIN32
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
+ mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
gregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), sizeof (mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
+#if TARGET_WIN32
+ amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
+ amd64_pop_reg (code, AMD64_RBP);
+ mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
+#else
amd64_leave (code);
+#endif
cfa_offset -= sizeof (mgreg_t);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
amd64_ret (code);
mono_arch_flush_icache (code, code - buf);
+ MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
g_assert (code - buf <= tramp_size);
+ g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
*info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
return buf;
}
-#if defined(ENABLE_GSHAREDVT) && defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
+/*
+ * mono_arch_get_enter_icall_trampoline:
+ *
+ * A trampoline that handles the transition from interpreter into native
+ * world. It requiers to set up a descriptor (InterpMethodArguments), so the
+ * trampoline can translate the arguments into the native calling convention.
+ *
+ * See also `build_args_from_sig ()` in interp.c.
+ */
+gpointer
+mono_arch_get_enter_icall_trampoline (MonoTrampInfo **info)
+{
+#ifdef ENABLE_INTERPRETER
+ const int gregs_num = INTERP_ICALL_TRAMP_IARGS;
+ const int fregs_num = INTERP_ICALL_TRAMP_FARGS;
+ guint8 *start = NULL, *code, *label_gexits [gregs_num], *label_fexits [fregs_num], *label_leave_tramp [3], *label_is_float_ret;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
+ static int farg_regs[] = {AMD64_XMM0, AMD64_XMM1, AMD64_XMM2};
+ int buf_len, i, framesize = 0, off_rbp, off_methodargs, off_targetaddr;
+
+ buf_len = 512 + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0);
+ start = code = (guint8 *) mono_global_codeman_reserve (buf_len);
+
+ off_rbp = -framesize;
+
+ framesize += sizeof (mgreg_t);
+ off_methodargs = -framesize;
+
+ framesize += sizeof (mgreg_t);
+ off_targetaddr = -framesize;
+
+ framesize += (gregs_num - PARAM_REGS) * sizeof (mgreg_t);
+
+ amd64_push_reg (code, AMD64_RBP);
+ amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (mgreg_t));
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT));
+
+ /* save InterpMethodArguments* onto stack */
+ amd64_mov_membase_reg (code, AMD64_RBP, off_methodargs, AMD64_ARG_REG2, sizeof (mgreg_t));
+
+ /* save target address on stack */
+ amd64_mov_membase_reg (code, AMD64_RBP, off_targetaddr, AMD64_ARG_REG1, sizeof (mgreg_t));
+
+ /* load pointer to InterpMethodArguments* into R11 */
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG2, 8);
+
+ /* move flen into RAX */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, flen), sizeof (mgreg_t));
+ /* load pointer to fargs into R11 */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, fargs), sizeof (mgreg_t));
+
+ for (i = 0; i < fregs_num; ++i) {
+ amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
+ label_fexits [i] = code;
+ x86_branch8 (code, X86_CC_Z, 0, FALSE);
+
+ amd64_sse_movsd_reg_membase (code, farg_regs [i], AMD64_R11, i * sizeof (double));
+ amd64_dec_reg_size (code, AMD64_RAX, 1);
+ }
+
+ for (i = 0; i < fregs_num; i++)
+ x86_patch (label_fexits [i], code);
+
+ /* load pointer to InterpMethodArguments* into R11 */
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG2, sizeof (mgreg_t));
+ /* move ilen into RAX */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, ilen), sizeof (mgreg_t));
+
+ int stack_offset = 0;
+ for (i = 0; i < gregs_num; i++) {
+ amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
+ label_gexits [i] = code;
+ x86_branch32 (code, X86_CC_Z, 0, FALSE);
+
+ /* load pointer to InterpMethodArguments* into R11 */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
+ /* load pointer to iargs into R11 */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, iargs), sizeof (mgreg_t));
-#include "../../../mono-extensions/mono/mini/tramp-amd64-gsharedvt.c"
+ if (i < PARAM_REGS) {
+ amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof (mgreg_t), sizeof (mgreg_t));
+ } else {
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, i * sizeof (mgreg_t), sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, stack_offset, AMD64_R11, sizeof (mgreg_t));
+ stack_offset += sizeof (mgreg_t);
+ }
+ amd64_dec_reg_size (code, AMD64_RAX, 1);
+ }
+
+ for (i = 0; i < gregs_num; i++)
+ x86_patch (label_gexits [i], code);
+
+ /* load target addr */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_targetaddr, sizeof (mgreg_t));
+
+ /* call into native function */
+ amd64_call_reg (code, AMD64_R11);
+
+ /* load InterpMethodArguments */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
+
+ /* load is_float_ret */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, is_float_ret), sizeof (mgreg_t));
+
+ /* check if a float return value is expected */
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+
+ label_is_float_ret = code;
+ x86_branch8 (code, X86_CC_NZ, 0, FALSE);
+
+ /* greg return */
+ /* load InterpMethodArguments */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
+ /* load retval */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, retval), sizeof (mgreg_t));
+
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label_leave_tramp [0] = code;
+ x86_branch8 (code, X86_CC_Z, 0, FALSE);
+
+ amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RAX, sizeof (mgreg_t));
+
+ label_leave_tramp [1] = code;
+ x86_jump8 (code, 0);
-#endif /* !ENABLE_GSHAREDVT */
+ /* freg return */
+ x86_patch (label_is_float_ret, code);
+ /* load InterpMethodArguments */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
+ /* load retval */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (InterpMethodArguments, retval), sizeof (mgreg_t));
+
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label_leave_tramp [2] = code;
+ x86_branch8 (code, X86_CC_Z, 0, FALSE);
+
+ amd64_sse_movsd_membase_reg (code, AMD64_R11, 0, AMD64_XMM0);
+
+ for (i = 0; i < 3; i++)
+ x86_patch (label_leave_tramp [i], code);
+
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT));
+ amd64_pop_reg (code, AMD64_RBP);
+ amd64_ret (code);
+
+ g_assert (code - start < buf_len);
+
+ mono_arch_flush_icache (start, code - start);
+ MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
+
+ if (info)
+ *info = mono_tramp_info_create ("enter_icall_trampoline", start, code - start, ji, unwind_ops);
+
+ return start;
+#else
+ g_assert_not_reached ();
+ return NULL;
+#endif /* ENABLE_INTERPRETER */
+}
+#endif /* !DISABLE_JIT */
+
+#ifdef DISABLE_JIT
+gpointer
+mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+gpointer
+mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+gpointer
+mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+guchar*
+mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+gpointer
+mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+gpointer
+mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+gpointer
+mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+void
+mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
+{
+ g_assert_not_reached ();
+ return;
+}
+
+guint8*
+mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+
+gpointer
+mono_arch_get_enter_icall_trampoline (MonoTrampInfo **info)
+{
+ g_assert_not_reached ();
+ return NULL;
+}
+#endif /* DISABLE_JIT */