* (C) 2001 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include <config.h>
mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
{
char *tramp_name;
- guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
- int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, ctx_offset, saved_regs_offset;
+ guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
+ int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, ex_offset, tramp_offset, ctx_offset, saved_regs_offset;
int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
gboolean has_caller;
GSList *unwind_ops = NULL;
MonoJumpInfo *ji = NULL;
- const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
+ const guint kMaxCodeSize = NACL_SIZE (630, 630*2);
#if defined(__native_client_codegen__)
const guint kNaClTrampOffset = 17;
offset += sizeof(mgreg_t);
rax_offset = -offset;
+ offset += sizeof(mgreg_t);
+ ex_offset = -offset;
+
offset += sizeof(mgreg_t);
r11_save_offset = -offset;
amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
}
+ /* cfa = rbp + cfa_offset */
+ mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (mgreg_t)));
}
for (i = 0; i < 8; ++i)
amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
amd64_mov_reg_imm (code, AMD64_R11, tramp);
}
amd64_call_reg (code, AMD64_R11);
-
- /* Check for thread interruption */
- /* This is not perf critical code so no need to check the interrupt flag */
- /*
- * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
- */
amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
- if (aot) {
- code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
- } else {
- amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
- }
- amd64_call_reg (code, AMD64_R11);
-
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
/* Restore LMF */
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
* Save rax to the stack, after the leave instruction, this will become part of
* the red zone.
*/
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
+ /* Check for thread interruption */
+ /* This is not perf critical code so no need to check the interrupt flag */
+ /*
+ * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
+ */
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
+ }
+ amd64_call_reg (code, AMD64_R11);
+
+ amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
+ br_ex_check = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+
+ /*
+ * Exception case:
+ * We have an exception we want to throw in the caller's frame, so pop
+ * the trampoline frame and throw from the caller.
+ */
+ amd64_leave (code);
+ /* We are in the parent frame, the exception is in rax */
+ /*
+ * EH is initialized after trampolines, so get the address of the variable
+ * which contains throw_exception, and load it from there.
+ */
+ if (aot) {
+ /* Not really a jit icall */
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_throw_exception_addr ());
+ }
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
+ amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
+ /*
+ * We still have the original return value on the top of the stack, so the
+ * throw trampoline will use that as the throw site.
+ */
+ amd64_jump_reg (code, AMD64_R11);
+
+ /* Normal case */
+ mono_amd64_patch (br_ex_check, code);
+
/* Restore argument registers, r10 (imt method/rgxtx)
and rax (needed for direct calls to C vararg functions). */
for (i = 0; i < AMD64_NREG; ++i)
cfa_offset -= sizeof (mgreg_t);
mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
-
if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
/* Load result */
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
return buf;
}
-
-#if defined(ENABLE_GSHAREDVT) && defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
-
-#include "../../../mono-extensions/mono/mini/tramp-amd64-gsharedvt.c"
-
-#else
-
-gpointer
-mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
-{
- g_assert_not_reached ();
- return NULL;
-}
-
-gpointer
-mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
-{
- g_assert_not_reached ();
- return NULL;
-}
-
-gpointer
-mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
-{
- *info = NULL;
- return NULL;
-}
-
-#endif /* !ENABLE_GSHAREDVT */