#include <mono/metadata/monitor.h>
#include <mono/arch/amd64/amd64-codegen.h>
-#ifdef HAVE_VALGRIND_MEMCHECK_H
-#include <valgrind/memcheck.h>
-#endif
+#include <mono/utils/memcheck.h>
#include "mini.h"
#include "mini-amd64.h"
this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), gsctx, NULL);
- mono_domain_lock (domain);
- start = code = mono_code_manager_reserve (domain->code_mp, 20);
- mono_domain_unlock (domain);
+ start = code = mono_domain_code_reserve (domain, 20);
amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
/* FIXME: Optimize this */
return start;
}
+/*
+ * mono_arch_get_static_rgctx_trampoline:
+ *
+ * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
+ */
+gpointer
+mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
+{
+ guint8 *code, *start;
+ int buf_len;
+
+ MonoDomain *domain = mono_domain_get ();
+
+#ifdef MONO_ARCH_NOMAP32BIT
+ buf_len = 32;
+#else
+ /* AOTed code could still have a non-32 bit address */
+ if ((((guint64)addr) >> 32) == 0)
+ buf_len = 16;
+ else
+ buf_len = 30;
+#endif
+
+ start = code = mono_domain_code_reserve (domain, buf_len);
+
+ amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
+ amd64_jump_code (code, addr);
+ g_assert ((code - start) < buf_len);
+
+ mono_arch_flush_icache (start, code - start);
+
+ return start;
+}
+
+gpointer
+mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
+{
+ guint8 *code, *start;
+ int buf_len;
+ int this_reg;
+
+ buf_len = 32;
+
+ start = code = mono_domain_code_reserve (domain, buf_len);
+
+ this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), NULL, NULL);
+
+ /* Set imt arg */
+ amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
+ /* Load vtable address */
+ amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
+ amd64_jump_membase (code, AMD64_RAX, vt_offset);
+ amd64_ret (code);
+
+ g_assert ((code - start) < buf_len);
+
+ mono_arch_flush_icache (start, code - start);
+
+ return start;
+}
+
/*
* mono_arch_patch_callsite:
*
if (code [-5] != 0xe8) {
if (can_write) {
InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
-#ifdef HAVE_VALGRIND_MEMCHECK_H
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
-#endif
}
} else {
if ((((guint64)(addr)) >> 32) != 0) {
+#ifdef MONO_ARCH_NOMAP32BIT
/* Print some diagnostics */
MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
if (ji)
if (ji)
fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
g_assert_not_reached ();
+#else
+ /*
+ * This might happen when calling AOTed code. Create a thunk.
+ */
+ guint8 *thunk_start, *thunk_code;
+
+ thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
+ amd64_jump_membase (thunk_code, AMD64_RIP, 0);
+ *(guint64*)thunk_code = (guint64)addr;
+ addr = thunk_start;
+ g_assert ((((guint64)(addr)) >> 32) == 0);
+ mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
+#endif
}
g_assert ((((guint64)(orig_code)) >> 32) == 0);
if (can_write) {
InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
-#ifdef HAVE_VALGRIND_MEMCHECK_H
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
-#endif
}
}
}
gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
if (can_write) {
InterlockedExchangePointer (got_entry, addr);
-#ifdef HAVE_VALGRIND_MEMCHECK_H
VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
-#endif
}
}
}
void
-mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
+mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
{
gint32 disp;
gpointer *plt_jump_table_entry;
}
void
-mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
+mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
{
guint8 buf [16];
- gboolean can_write = mono_breakpoint_clean_code (NULL, code, 7, buf, sizeof (buf));
+ MonoJitInfo *ji = NULL;
+ gboolean can_write;
+
+ if (mono_use_llvm) {
+ /* code - 7 might be before the start of the method */
+ /* FIXME: Avoid this expensive call somehow */
+ ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
+ }
+
+ can_write = mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 7, buf, sizeof (buf));
if (!can_write)
return;
* really careful about the ordering of the cases. Longer sequences
* come first.
*/
- if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
+ if ((buf [0] == 0x41) && (buf [1] == 0xff) && (buf [2] == 0x15)) {
gpointer *vtable_slot;
/* call *<OFFSET>(%rip) */
- vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code + 3, regs);
g_assert (vtable_slot);
*vtable_slot = nullified_class_init_trampoline;
- } else if (code [-2] == 0xe8) {
+ } else if (buf [2] == 0xe8) {
/* call <TARGET> */
- guint8 *buf = code - 2;
-
+ //guint8 *buf = code - 2;
+
+ /*
+ * It would be better to replace the call with nops, but that doesn't seem
+ * to work on SMP machines even when the whole call is inside a cache line.
+ * Patching the call address seems to work.
+ */
+ /*
buf [0] = 0x66;
buf [1] = 0x66;
buf [2] = 0x90;
buf [3] = 0x66;
buf [4] = 0x90;
- } else if ((code [0] == 0x41) && (code [1] == 0xff)) {
- /* call <REG> */
- /* happens on machines without MAP_32BIT like freebsd */
- /* amd64_set_reg_template is 10 bytes long */
- guint8* buf = code - 10;
-
- /* FIXME: Make this thread safe */
- /* Padding code suggested by the AMD64 Opt Manual */
- buf [0] = 0x66;
- buf [1] = 0x66;
- buf [2] = 0x66;
- buf [3] = 0x90;
- buf [4] = 0x66;
- buf [5] = 0x66;
- buf [6] = 0x66;
- buf [7] = 0x90;
- buf [8] = 0x66;
- buf [9] = 0x66;
- buf [10] = 0x90;
- buf [11] = 0x66;
- buf [12] = 0x90;
- } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
+ */
+
+ mono_arch_patch_callsite (code - 2, code - 2 + 5, nullified_class_init_trampoline);
+ } else if ((buf [5] == 0xff) && x86_modrm_mod (buf [6]) == 3 && x86_modrm_reg (buf [6]) == 2) {
+ /* call *<reg> */
+ /* Generated by the LLVM JIT or on platforms without MAP_32BIT set */
+ guint8* buf = code;
+
+ /* FIXME: Not thread safe */
+ buf [1] = 0x90;
+ buf [2] = 0x90;
+ } else if (buf [4] == 0x90 || buf [5] == 0xeb || buf [6] == 0x66) {
/* Already changed by another thread */
;
} else {
- printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
- code [4], code [5], code [6]);
+ printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", buf [0], buf [1], buf [2], buf [3],
+ buf [4], buf [5], buf [6]);
g_assert_not_reached ();
}
}
void
-mono_arch_nullify_plt_entry (guint8 *code)
+mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
{
if (mono_aot_only && !nullified_class_init_trampoline)
nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
- mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
+ mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
}
guchar*
{
MonoJumpInfo *ji;
guint32 code_size;
+ guchar *code;
+ GSList *unwind_ops, *l;
+
+ code = mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, &unwind_ops, FALSE);
+
+ mono_save_trampoline_xdebug_info ("<generic_trampoline>", code, code_size, unwind_ops);
- return mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, FALSE);
+ for (l = unwind_ops; l; l = l->next)
+ g_free (l->data);
+ g_slist_free (unwind_ops);
+
+ return code;
}
guchar*
-mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
+mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, GSList **out_unwind_ops, gboolean aot)
{
guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
- int i, lmf_offset, offset, res_offset, arg_offset, tramp_offset, saved_regs_offset;
- int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset;
+ int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
+ int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
gboolean has_caller;
+ GSList *unwind_ops = NULL;
if (tramp_type == MONO_TRAMPOLINE_JUMP)
has_caller = FALSE;
else
has_caller = TRUE;
- code = buf = mono_global_codeman_reserve (524);
+ code = buf = mono_global_codeman_reserve (538);
*ji = NULL;
- framesize = 524 + sizeof (MonoLMF);
+ framesize = 538 + sizeof (MonoLMF);
framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
orig_rsp_to_rbp_offset = 0;
code += 5;
after_r11_save_code = code;
+ // CFA = sp + 16 (the trampoline address is on the stack)
+ cfa_offset = 16;
+ mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
+ // IP saved at CFA - 8
+ mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
+
/* Pop the return address off the stack */
amd64_pop_reg (code, AMD64_R11);
orig_rsp_to_rbp_offset += 8;
+ cfa_offset -= 8;
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
+
/*
* Allocate a new stack frame
*/
amd64_push_reg (code, AMD64_RBP);
+ cfa_offset += 8;
+ mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
+ mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
+
orig_rsp_to_rbp_offset -= 8;
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
+ mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
offset = 0;
rbp_offset = - offset;
+ offset += 8;
+ rax_offset = - offset;
+
offset += 8;
tramp_offset = - offset;
amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
}
/* Save callee saved regs */
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
#endif
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
+ /*
+ * Save rax to the stack, after the leave instruction, this will become part of
+ * the red zone.
+ */
+ amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, 8);
+
/* Restore argument registers, r10 (needed to pass rgctx to
- static shared generic methods) and r11 (imt register for
- interface calls). */
+ static shared generic methods), r11 (imt register for
+ interface calls), and rax (needed for direct calls to C vararg functions). */
for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11)
+ if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11 || i == AMD64_RAX)
amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
- /*
- * FIXME: When using aot-only, the called code might be a C vararg function
- * which uses %rax as well.
- * We could restore it, but we would have to use another register to store the
- * target address, and we don't have any left.
- * Also, the default AOT plt trampolines overwrite 'rax'.
- */
-
for (i = 0; i < 8; ++i)
amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
- if (tramp_type == MONO_TRAMPOLINE_RESTORE_STACK_PROT)
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, saved_regs_offset + (AMD64_RAX * 8), 8);
-
/* Restore stack */
amd64_leave (code);
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
+ /* Load result */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - 0x8, 8);
amd64_ret (code);
} else {
- /* call the compiled method */
- amd64_jump_reg (code, AMD64_RAX);
+ /* call the compiled method using the saved rax */
+ amd64_jump_membase (code, AMD64_RSP, rax_offset - 0x8);
}
- g_assert ((code - buf) <= 524);
+ g_assert ((code - buf) <= 538);
mono_arch_flush_icache (buf, code - buf);
nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
}
+ *out_unwind_ops = unwind_ops;
+
return buf;
}
else
size = 5 + 1 + 8;
- mono_domain_lock (domain);
- code = buf = mono_code_manager_reserve_align (domain->code_mp, size, 1);
- mono_domain_unlock (domain);
+ code = buf = mono_domain_code_reserve_align (domain, size, 1);
amd64_call_code (code, tramp);
/* The trampoline code will obtain the argument from the instruction stream */
mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
index = MONO_RGCTX_SLOT_INDEX (slot);
if (mrgctx)
- index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
+ index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
for (depth = 0; ; ++depth) {
int size = mono_class_rgctx_get_array_size (depth, mrgctx);
for (i = 0; i < depth; ++i) {
/* load ptr to next array */
if (mrgctx && i == 0)
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (MonoMethodRuntimeGenericContext), 8);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 8);
else
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, 8);
/* is the ptr null? */
g_assert (code - buf <= tramp_size);
+ *code_size = code - buf;
+
return buf;
}
gpointer
mono_arch_create_generic_class_init_trampoline (void)
+{
+ guint32 code_size;
+ MonoJumpInfo *ji;
+
+ return mono_arch_create_generic_class_init_trampoline_full (&code_size, &ji, FALSE);
+}
+
+gpointer
+mono_arch_create_generic_class_init_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
{
guint8 *tramp;
guint8 *code, *buf;
guint8 *jump;
int tramp_size;
+ *ji = NULL;
+
tramp_size = 64;
code = buf = mono_global_codeman_reserve (tramp_size);
x86_patch (jump, code);
- tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ amd64_jump_reg (code, AMD64_R11);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
- /* jump to the actual trampoline */
- amd64_jump_code (code, tramp);
+ /* jump to the actual trampoline */
+ amd64_jump_code (code, tramp);
+ }
mono_arch_flush_icache (buf, code - buf);
g_assert (code - buf <= tramp_size);
+ *code_size = code - buf;
+
return buf;
}
#ifdef MONO_ARCH_MONITOR_OBJECT_REG
+
gpointer
mono_arch_create_monitor_enter_trampoline (void)
{
- guint8 *tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
+ guint32 code_size;
+ MonoJumpInfo *ji;
+
+ return mono_arch_create_monitor_enter_trampoline_full (&code_size, &ji, FALSE);
+}
+
+gpointer
+mono_arch_create_monitor_enter_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
+{
+
+ guint8 *tramp;
guint8 *code, *buf;
guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid;
int tramp_size;
int owner_offset, nest_offset, dummy;
+ *ji = NULL;
+
g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
- tramp_size = 64;
+ tramp_size = 96;
code = buf = mono_global_codeman_reserve (tramp_size);
if (mono_thread_get_tls_offset () != -1) {
/* MonoObject* obj is in RDI */
/* is obj null? */
- amd64_test_reg_reg (buf, AMD64_RDI, AMD64_RDI);
+ amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
/* if yes, jump to actual trampoline */
- jump_obj_null = buf;
- amd64_branch8 (buf, X86_CC_Z, -1, 1);
+ jump_obj_null = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
/* load obj->synchronization to RCX */
- amd64_mov_reg_membase (buf, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
/* is synchronization null? */
- amd64_test_reg_reg (buf, AMD64_RCX, AMD64_RCX);
+ amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
/* if yes, jump to actual trampoline */
- jump_sync_null = buf;
- amd64_branch8 (buf, X86_CC_Z, -1, 1);
+ jump_sync_null = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
- /* load MonoThread* into RDX */
- buf = mono_amd64_emit_tls_get (buf, AMD64_RDX, mono_thread_get_tls_offset ());
+ /* load MonoInternalThread* into RDX */
+ code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
/* load TID into RDX */
- amd64_mov_reg_membase (buf, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoThread, tid), 8);
+ amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
/* is synchronization->owner null? */
- amd64_alu_membase_imm_size (buf, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
/* if not, jump to next case */
- jump_tid = buf;
- amd64_branch8 (buf, X86_CC_NZ, -1, 1);
+ jump_tid = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
/* if yes, try a compare-exchange with the TID */
/* zero RAX */
- amd64_alu_reg_reg (buf, X86_XOR, AMD64_RAX, AMD64_RAX);
+ amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
/* compare and exchange */
- amd64_prefix (buf, X86_LOCK_PREFIX);
- amd64_cmpxchg_membase_reg_size (buf, AMD64_RCX, owner_offset, AMD64_RDX, 8);
+ amd64_prefix (code, X86_LOCK_PREFIX);
+ amd64_cmpxchg_membase_reg_size (code, AMD64_RCX, owner_offset, AMD64_RDX, 8);
/* if not successful, jump to actual trampoline */
- jump_cmpxchg_failed = buf;
- amd64_branch8 (buf, X86_CC_NZ, -1, 1);
+ jump_cmpxchg_failed = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
/* if successful, return */
- amd64_ret (buf);
+ amd64_ret (code);
/* next case: synchronization->owner is not null */
- x86_patch (jump_tid, buf);
+ x86_patch (jump_tid, code);
/* is synchronization->owner == TID? */
- amd64_alu_membase_reg_size (buf, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
+ amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
/* if not, jump to actual trampoline */
- jump_other_owner = buf;
- amd64_branch8 (buf, X86_CC_NZ, -1, 1);
+ jump_other_owner = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
/* if yes, increment nest */
- amd64_inc_membase_size (buf, AMD64_RCX, nest_offset, 4);
+ amd64_inc_membase_size (code, AMD64_RCX, nest_offset, 4);
/* return */
- amd64_ret (buf);
+ amd64_ret (code);
- x86_patch (jump_obj_null, buf);
- x86_patch (jump_sync_null, buf);
- x86_patch (jump_cmpxchg_failed, buf);
- x86_patch (jump_other_owner, buf);
+ x86_patch (jump_obj_null, code);
+ x86_patch (jump_sync_null, code);
+ x86_patch (jump_cmpxchg_failed, code);
+ x86_patch (jump_other_owner, code);
}
/* jump to the actual trampoline */
#if MONO_AMD64_ARG_REG1 != AMD64_RDI
- amd64_mov_reg_reg (buf, MONO_AMD64_ARG_REG1, AMD64_RDI);
+ amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
#endif
- amd64_jump_code (buf, tramp);
- mono_arch_flush_icache (buf, buf - code);
- g_assert (buf - code <= tramp_size);
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ amd64_jump_reg (code, AMD64_R11);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
- return code;
+ /* jump to the actual trampoline */
+ amd64_jump_code (code, tramp);
+ }
+
+ mono_arch_flush_icache (code, code - buf);
+ g_assert (code - buf <= tramp_size);
+
+ *code_size = code - buf;
+
+ return buf;
}
gpointer
mono_arch_create_monitor_exit_trampoline (void)
{
- guint8 *tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
+ guint32 code_size;
+ MonoJumpInfo *ji;
+
+ return mono_arch_create_monitor_exit_trampoline_full (&code_size, &ji, FALSE);
+}
+
+gpointer
+mono_arch_create_monitor_exit_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
+{
+ guint8 *tramp;
guint8 *code, *buf;
guint8 *jump_obj_null, *jump_have_waiters;
guint8 *jump_next;
int tramp_size;
int owner_offset, nest_offset, entry_count_offset;
+ *ji = NULL;
+
g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
- tramp_size = 64;
+ tramp_size = 94;
code = buf = mono_global_codeman_reserve (tramp_size);
if (mono_thread_get_tls_offset () != -1) {
/* MonoObject* obj is in RDI */
/* is obj null? */
- amd64_test_reg_reg (buf, AMD64_RDI, AMD64_RDI);
+ amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
/* if yes, jump to actual trampoline */
- jump_obj_null = buf;
- amd64_branch8 (buf, X86_CC_Z, -1, 1);
+ jump_obj_null = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
/* load obj->synchronization to RCX */
- amd64_mov_reg_membase (buf, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
/* is synchronization null? */
- amd64_test_reg_reg (buf, AMD64_RCX, AMD64_RCX);
+ amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
/* if not, jump to next case */
- jump_next = buf;
- amd64_branch8 (buf, X86_CC_NZ, -1, 1);
+ jump_next = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
/* if yes, just return */
- amd64_ret (buf);
+ amd64_ret (code);
/* next case: synchronization is not null */
- x86_patch (jump_next, buf);
- /* load MonoThread* into RDX */
- buf = mono_amd64_emit_tls_get (buf, AMD64_RDX, mono_thread_get_tls_offset ());
+ x86_patch (jump_next, code);
+ /* load MonoInternalThread* into RDX */
+ code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
/* load TID into RDX */
- amd64_mov_reg_membase (buf, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoThread, tid), 8);
+ amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
/* is synchronization->owner == TID */
- amd64_alu_membase_reg_size (buf, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
+ amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
/* if yes, jump to next case */
- jump_next = buf;
- amd64_branch8 (buf, X86_CC_Z, -1, 1);
+ jump_next = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
/* if not, just return */
- amd64_ret (buf);
+ amd64_ret (code);
/* next case: synchronization->owner == TID */
- x86_patch (jump_next, buf);
+ x86_patch (jump_next, code);
/* is synchronization->nest == 1 */
- amd64_alu_membase_imm_size (buf, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
/* if not, jump to next case */
- jump_next = buf;
- amd64_branch8 (buf, X86_CC_NZ, -1, 1);
+ jump_next = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
/* if yes, is synchronization->entry_count zero? */
- amd64_alu_membase_imm_size (buf, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
/* if not, jump to actual trampoline */
- jump_have_waiters = buf;
- amd64_branch8 (buf, X86_CC_NZ, -1 , 1);
+ jump_have_waiters = code;
+ amd64_branch8 (code, X86_CC_NZ, -1 , 1);
/* if yes, set synchronization->owner to null and return */
- amd64_mov_membase_imm (buf, AMD64_RCX, owner_offset, 0, 8);
- amd64_ret (buf);
+ amd64_mov_membase_imm (code, AMD64_RCX, owner_offset, 0, 8);
+ amd64_ret (code);
/* next case: synchronization->nest is not 1 */
- x86_patch (jump_next, buf);
+ x86_patch (jump_next, code);
/* decrease synchronization->nest and return */
- amd64_dec_membase_size (buf, AMD64_RCX, nest_offset, 4);
- amd64_ret (buf);
+ amd64_dec_membase_size (code, AMD64_RCX, nest_offset, 4);
+ amd64_ret (code);
- x86_patch (jump_obj_null, buf);
- x86_patch (jump_have_waiters, buf);
+ x86_patch (jump_obj_null, code);
+ x86_patch (jump_have_waiters, code);
}
/* jump to the actual trampoline */
#if MONO_AMD64_ARG_REG1 != AMD64_RDI
- amd64_mov_reg_reg (buf, MONO_AMD64_ARG_REG1, AMD64_RDI);
+ amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
#endif
- amd64_jump_code (buf, tramp);
- mono_arch_flush_icache (buf, buf - code);
- g_assert (buf - code <= tramp_size);
+ if (aot) {
+ *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+ amd64_jump_reg (code, AMD64_R11);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
+ amd64_jump_code (code, tramp);
+ }
+
+ mono_arch_flush_icache (code, code - buf);
+ g_assert (code - buf <= tramp_size);
- return code;
+ *code_size = code - buf;
+
+ return buf;
}
#endif