* Zoltan Varga (vargaz@gmail.com)
*
* (C) 2001 Ximian, Inc.
+ * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
*/
#include <config.h>
amd64_jump_reg (code, AMD64_RAX);
g_assert ((code - start) < 20);
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_domain_code_validate (domain, &start, 20, &code);
-#endif
mono_arch_flush_icache (start, code - start);
amd64_jump_code (code, addr);
g_assert ((code - start) < buf_len);
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_domain_code_validate (domain, &start, buf_len, &code);
-#endif
mono_arch_flush_icache (start, code - start);
return start;
g_assert ((code - start) < buf_len);
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_domain_code_validate (domain, &start, buf_len, &code);
-#endif
mono_arch_flush_icache (start, code - start);
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
}
} else {
- if ((((guint64)(addr)) >> 32) != 0) {
+ gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
+
+ if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
#ifdef MONO_ARCH_NOMAP32BIT
/* Print some diagnostics */
MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
#endif
}
- g_assert ((((guint64)(orig_code)) >> 32) == 0);
if (can_write) {
InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
}
+static void
+stack_unaligned (MonoTrampolineType tramp_type)
+{
+ printf ("%d\n", tramp_type);
+ g_assert_not_reached ();
+}
+
guchar*
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
gboolean has_caller;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- const guint kMaxCodeSize = NACL_SIZE (548, 548*2);
+ const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
#if defined(__native_client_codegen__)
const guint kNaClTrampOffset = 17;
#endif
- if (tramp_type == MONO_TRAMPOLINE_JUMP)
+ if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
has_caller = FALSE;
else
has_caller = TRUE;
/* Pop the return address off the stack */
amd64_pop_reg (code, AMD64_R11);
- orig_rsp_to_rbp_offset += SIZEOF_REGISTER;
+ orig_rsp_to_rbp_offset += sizeof(mgreg_t);
- cfa_offset -= SIZEOF_REGISTER;
+ cfa_offset -= sizeof(mgreg_t);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
/*
* Allocate a new stack frame
*/
amd64_push_reg (code, AMD64_RBP);
- cfa_offset += SIZEOF_REGISTER;
+ cfa_offset += sizeof(mgreg_t);
mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
- orig_rsp_to_rbp_offset -= SIZEOF_REGISTER;
- amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, SIZEOF_REGISTER);
+ orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
+ amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
offset = 0;
rbp_offset = - offset;
- offset += SIZEOF_REGISTER;
+ offset += sizeof(mgreg_t);
rax_offset = - offset;
- offset += sizeof(gpointer);
+ offset += sizeof(mgreg_t);
tramp_offset = - offset;
offset += sizeof(gpointer);
}
amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
- offset += SIZEOF_REGISTER;
+ offset += sizeof(mgreg_t);
res_offset = - offset;
/* Save all registers */
- offset += AMD64_NREG * SIZEOF_REGISTER;
+ offset += AMD64_NREG * sizeof(mgreg_t);
saved_regs_offset = - offset;
for (i = 0; i < AMD64_NREG; ++i) {
if (i == AMD64_RBP) {
/* RAX is already saved */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
} else if (i != AMD64_R11) {
- amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
} else {
/* We have to save R11 right at the start of
the trampoline code because it's used as a
scratch register */
- amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * SIZEOF_REGISTER), i, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
g_assert (r11_save_code == after_r11_save_code);
}
}
- offset += 8 * SIZEOF_REGISTER;
+ offset += 8 * sizeof(mgreg_t);
saved_fpregs_offset = - offset;
for (i = 0; i < 8; ++i)
- amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * SIZEOF_REGISTER), i);
+ amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
+
+ /* Check that the stack is aligned */
+#if defined(__default_codegen__)
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
+ amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
+ br [0] = code;
+ amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
+ if (aot) {
+ amd64_mov_reg_imm (code, AMD64_R11, 0);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_RDI, tramp_type);
+ amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
+ amd64_call_reg (code, AMD64_R11);
+ }
+ mono_amd64_patch (br [0], code);
+ //amd64_breakpoint (code);
+#endif
if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
- tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
- tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT) {
+ tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
+ tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT &&
+ tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
/* Obtain the trampoline argument which is encoded in the instruction stream */
if (aot) {
/* Load the GOT offset */
}
amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
} else {
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * SIZEOF_REGISTER), SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
}
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
else
amd64_mov_reg_imm (code, AMD64_R11, 0);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
/* Save fp */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, sizeof(mgreg_t));
/* Save sp */
- amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, SIZEOF_REGISTER);
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
/* Save method */
if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, sizeof(gpointer));
}
/* Save callee saved regs */
#ifdef TARGET_WIN32
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, sizeof(mgreg_t));
#endif
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, sizeof(mgreg_t));
if (aot) {
code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
/*
* Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
*/
- amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
if (aot) {
code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
} else {
}
amd64_call_reg (code, AMD64_R11);
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
/* Restore LMF */
* Save rax to the stack, after the leave instruction, this will become part of
* the red zone.
*/
- amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
/* Restore argument registers, r10 (imt method/rgxtx)
and rax (needed for direct calls to C vararg functions). */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
- amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * SIZEOF_REGISTER), SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
for (i = 0; i < 8; ++i)
- amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * SIZEOF_REGISTER));
+ amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
/* Restore stack */
amd64_leave (code);
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
/* Load result */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - SIZEOF_REGISTER, SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
amd64_ret (code);
} else {
/* call the compiled method using the saved rax */
- amd64_jump_membase (code, AMD64_RSP, rax_offset - SIZEOF_REGISTER);
+ amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
}
g_assert ((code - buf) <= kMaxCodeSize);
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
-#endif
mono_arch_flush_icache (buf, code - buf);
code = buf = mono_global_codeman_reserve (16);
amd64_ret (code);
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate(&buf, 16, &code);
-#endif
mono_arch_flush_icache (buf, code - buf);
{
guint8 *code, *buf, *tramp;
int size;
+ gboolean far_addr = FALSE;
tramp = mono_get_trampoline_code (tramp_type);
size = 5 + 1 + 8;
code = buf = mono_domain_code_reserve_align (domain, size, 1);
+
+ if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
+#ifndef MONO_ARCH_NOMAP32BIT
+ g_assert_not_reached ();
+#endif
+ far_addr = TRUE;
+ size += 16;
+ code = buf = mono_domain_code_reserve_align (domain, size, 1);
+ }
#elif defined(__native_client_codegen__)
size = 5 + 1 + 4;
/* Aligning the call site below could */
code = buf;
#endif
- amd64_call_code (code, tramp);
+ if (far_addr) {
+ amd64_mov_reg_imm (code, AMD64_R11, tramp);
+ amd64_call_reg (code, AMD64_R11);
+ } else {
+ amd64_call_code (code, tramp);
+ }
/* The trampoline code will obtain the argument from the instruction stream */
#if defined(__default_codegen__)
if ((((guint64)arg1) >> 32) == 0) {
if (code_len)
*code_len = size;
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_domain_code_validate(domain, &buf, size, &code);
-#endif
mono_arch_flush_icache (buf, size);
amd64_jump_code (code, tramp);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate (&buf, tramp_size, &code);
-#endif
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
amd64_jump_code (code, tramp);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate (&buf, tramp_size, &code);
-#endif
mono_arch_flush_icache (buf, code - buf);
amd64_jump_code (code, tramp);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate (&buf, tramp_size, &code);
-#endif
mono_arch_flush_icache (code, code - buf);
g_assert (code - buf <= tramp_size);
amd64_jump_code (code, tramp);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate (&buf, tramp_size, &code);
-#endif
mono_arch_flush_icache (code, code - buf);
g_assert (code - buf <= tramp_size);
static void
handler_block_trampoline_helper (gpointer *ptr)
{
- MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
+ MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
*ptr = jit_tls->handler_block_return_address;
}
if (mono_get_jit_tls_offset () != -1) {
code = mono_amd64_emit_tls_get (code, AMD64_RDI, mono_get_jit_tls_offset ());
- /*simulate a call*/
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RDI, G_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
+ /* Simulate a call */
+ amd64_push_reg (code, AMD64_RAX);
amd64_jump_code (code, tramp);
} else {
/*Slow path uses a c helper*/