* (C) 2001 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/mono-debug-debugger.h>
#include <mono/metadata/profiler-private.h>
-#include <mono/metadata/gc-internal.h>
+#include <mono/metadata/gc-internals.h>
#include <mono/arch/amd64/amd64-codegen.h>
#include <mono/utils/memcheck.h>
this_reg = mono_arch_get_this_arg_reg (NULL);
- start = code = mono_domain_code_reserve (domain, size);
+ start = code = (guint8 *)mono_domain_code_reserve (domain, size);
unwind_ops = mono_arch_get_cie_program ();
buf_len = NACL_SIZE (30, 32);
#endif
- start = code = mono_domain_code_reserve (domain, buf_len);
+ start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len);
unwind_ops = mono_arch_get_cie_program ();
return start;
}
-gpointer
-mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
-{
- guint8 *code, *start;
- int buf_len;
- int this_reg;
-
- buf_len = 32;
-
- start = code = mono_domain_code_reserve (domain, buf_len);
-
- this_reg = mono_arch_get_this_arg_reg (NULL);
-
- /* Set imt arg */
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
- /* Load vtable address */
- amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
- amd64_jump_membase (code, AMD64_RAX, vt_offset);
- amd64_ret (code);
-
- g_assert ((code - start) < buf_len);
-
- nacl_domain_code_validate (domain, &start, buf_len, &code);
-
- mono_arch_flush_icache (start, code - start);
- mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
-
- return start;
-}
-
#ifdef _WIN64
// Workaround lack of Valgrind support for 64-bit Windows
#define VALGRIND_DISCARD_TRANSLATIONS(...)
*/
guint8 *thunk_start, *thunk_code;
- thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
+ thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
amd64_jump_membase (thunk_code, AMD64_RIP, 0);
*(guint64*)thunk_code = (guint64)addr;
addr = thunk_start;
*/
guint8 *thunk_start, *thunk_code;
- thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
+ thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
amd64_jump_membase (thunk_code, AMD64_RIP, 0);
*(guint64*)thunk_code = (guint64)addr;
addr = thunk_start;
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
char *tramp_name;
- guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
- int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, ctx_offset, saved_regs_offset;
+ guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
+ int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, ex_offset, tramp_offset, ctx_offset, saved_regs_offset;
int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
gboolean has_caller;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
+ const guint kMaxCodeSize = NACL_SIZE (630, 630*2);
#if defined(__native_client_codegen__)
const guint kNaClTrampOffset = 17;
else
has_caller = TRUE;
- code = buf = mono_global_codeman_reserve (kMaxCodeSize);
+ code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
/* Compute stack frame size and offsets */
offset = 0;
offset += sizeof(mgreg_t);
rax_offset = -offset;
+ offset += sizeof(mgreg_t);
+ ex_offset = -offset;
+
offset += sizeof(mgreg_t);
r11_save_offset = -offset;
amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
}
+ /* cfa = rbp + cfa_offset */
+ mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (mgreg_t)));
}
for (i = 0; i < 8; ++i)
amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
amd64_mov_reg_imm (code, AMD64_R11, tramp);
}
amd64_call_reg (code, AMD64_R11);
-
- /* Check for thread interruption */
- /* This is not perf critical code so no need to check the interrupt flag */
- /*
- * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
- */
amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
- if (aot) {
- code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
- } else {
- amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
- }
- amd64_call_reg (code, AMD64_R11);
-
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
/* Restore LMF */
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
* Save rax to the stack, after the leave instruction, this will become part of
* the red zone.
*/
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
+ /* Check for thread interruption */
+ /* This is not perf critical code so no need to check the interrupt flag */
+ /*
+ * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
+ */
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
+ }
+ amd64_call_reg (code, AMD64_R11);
+
+ amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
+ br_ex_check = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+
+ /*
+ * Exception case:
+ * We have an exception we want to throw in the caller's frame, so pop
+ * the trampoline frame and throw from the caller.
+ */
+ amd64_leave (code);
+ /* We are in the parent frame, the exception is in rax */
+ /*
+ * EH is initialized after trampolines, so get the address of the variable
+ * which contains throw_exception, and load it from there.
+ */
+ if (aot) {
+ /* Not really a jit icall */
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_throw_exception_addr ());
+ }
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
+ /*
+ * We still have the original return value on the top of the stack, so the
+ * throw trampoline will use that as the throw site.
+ */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* Normal case */
+ mono_amd64_patch (br_ex_check, code);
+
/* Restore argument registers, r10 (imt method/rgxtx)
and rax (needed for direct calls to C vararg functions). */
for (i = 0; i < AMD64_NREG; ++i)
cfa_offset -= sizeof (mgreg_t);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
-
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
/* Load result */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
else
size = 5 + 1 + 8;
- code = buf = mono_domain_code_reserve_align (domain, size, 1);
+ code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
#ifndef MONO_ARCH_NOMAP32BIT
#endif
far_addr = TRUE;
size += 16;
- code = buf = mono_domain_code_reserve_align (domain, size, 1);
+ code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
}
#elif defined(__native_client_codegen__)
size = 5 + 1 + 4;
tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
- code = buf = mono_global_codeman_reserve (tramp_size);
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
unwind_ops = mono_arch_get_cie_program ();
- rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
+ rgctx_null_jumps = (guint8 **)g_malloc (sizeof (guint8*) * (depth + 2));
if (mrgctx) {
/* get mrgctx ptr */
code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
amd64_jump_reg (code, AMD64_R11);
} else {
- tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
+ tramp = (guint8 *)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
/* jump to the actual trampoline */
amd64_jump_code (code, tramp);
mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
{
/* FIXME: This is not thread safe */
- guint8 *code = ji->code_start;
+ guint8 *code = (guint8 *)ji->code_start;
amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
amd64_mov_reg_imm (code, AMD64_R11, func);
static void
handler_block_trampoline_helper (gpointer *ptr)
{
- MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
+ MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
*ptr = jit_tls->handler_block_return_address;
}
g_assert (!aot);
- code = buf = mono_global_codeman_reserve (tramp_size);
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
unwind_ops = mono_arch_get_cie_program ();
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- code = buf = mono_global_codeman_reserve (tramp_size);
+ code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
framesize = 0;
#ifdef TARGET_WIN32
amd64_ret (code);
mono_arch_flush_icache (code, code - buf);
+ mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
g_assert (code - buf <= tramp_size);
const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
return buf;
}
-
-#if defined(ENABLE_GSHAREDVT)
-
-#include "../../../mono-extensions/mono/mini/tramp-amd64-gsharedvt.c"
-
-#endif /* !ENABLE_GSHAREDVT */