#include "cpu-amd64.h"
#include "debugger-agent.h"
-/*
- * Can't define this in mini-amd64.h cause that would turn on the generic code in
- * method-to-ir.c.
- */
-#define MONO_ARCH_IMT_REG AMD64_R11
-
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
guint32 freg_usage;
gboolean need_stack_align;
gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
static CallInfo*
get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint32 i, gr, fr;
+ guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
if (cinfo->ret.storage == ArgOnStack) {
cinfo->vtype_retaddr = TRUE;
/* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
}
break;
}
case MONO_TYPE_TYPEDBYREF:
/* Same as a valuetype with size 24 */
- add_general (&gr, &stack_size, &cinfo->ret);
- ;
+ cinfo->vtype_retaddr = TRUE;
break;
case MONO_TYPE_VOID:
break;
}
}
- /* this */
- if (sig->hasthis)
- add_general (&gr, &stack_size, cinfo->args + 0);
+ pstart = 0;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+ pstart = 1;
+ }
+ add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis)
+ add_general (&gr, &stack_size, cinfo->args + 0);
+
+ if (cinfo->vtype_retaddr)
+ add_general (&gr, &stack_size, &cinfo->ret);
+ }
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+
+ mono_aot_register_jit_icall ("mono_amd64_throw_exception", mono_amd64_throw_exception);
+ mono_aot_register_jit_icall ("mono_amd64_throw_corlib_exception", mono_amd64_throw_corlib_exception);
+ mono_aot_register_jit_icall ("mono_amd64_get_original_ip", mono_amd64_get_original_ip);
}
/*
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage == ArgInIReg) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
+ linfo->vret_arg_index = cinfo->vret_arg_index;
}
for (i = 0; i < n; ++i) {
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
- if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
- /*
- * Can't use R11 because it is clobbered by the trampoline
- * code, and the reg value is needed by get_vcall_slot_addr.
- */
- amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
- ins->sreg1 = AMD64_RAX;
- }
-
- /*
- * Emit a few nops to simplify get_vcall_slot ().
- */
- amd64_nop (code);
- amd64_nop (code);
- amd64_nop (code);
-
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
exc_classes [nthrows] = exc_class;
exc_throw_start [nthrows] = code;
}
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, exc_class->type_token);
+ amd64_mov_reg_imm (code, AMD64_ARG_REG1, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
patch_info->type = MONO_PATCH_INFO_NONE;
return can_write;
}
-gpointer
-mono_arch_get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
-{
- guint8 buf [10];
- guint32 reg;
- gint32 disp;
- guint8 rex = 0;
- MonoJitInfo *ji = NULL;
-
-#ifdef ENABLE_LLVM
- /* code - 9 might be before the start of the method */
- /* FIXME: Avoid this expensive call somehow */
- ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
-#endif
-
- mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
- code = buf + 9;
-
- *displacement = 0;
-
- code -= 7;
-
- /*
- * A given byte sequence can match more than case here, so we have to be
- * really careful about the ordering of the cases. Longer sequences
- * come first.
- * There are two types of calls:
- * - direct calls: 0xff address_byte 8/32 bits displacement
- * - indirect calls: nop nop nop <call>
- * The nops make sure we don't confuse the instruction preceeding an indirect
- * call with a direct call.
- */
- if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
- /* call OFFSET(%rip) */
- disp = *(guint32*)(code + 3);
- return (gpointer*)(code + disp + 7);
- } else if ((code [0] == 0xff) && (amd64_modrm_reg (code [1]) == 0x2) && (amd64_modrm_mod (code [1]) == 0x2) && (amd64_sib_index (code [2]) == 4) && (amd64_sib_scale (code [2]) == 0)) {
- /* call *[reg+disp32] using indexed addressing */
- /* The LLVM JIT emits this, and we emit it too for %r12 */
- if (IS_REX (code [-1])) {
- rex = code [-1];
- g_assert (amd64_rex_x (rex) == 0);
- }
- reg = amd64_sib_base (code [2]);
- disp = *(gint32*)(code + 3);
- } else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
- /* call *[reg+disp32] */
- if (IS_REX (code [0]))
- rex = code [0];
- reg = amd64_modrm_rm (code [2]);
- disp = *(gint32*)(code + 3);
- /* R10 is clobbered by the IMT thunk code */
- g_assert (reg != AMD64_R10);
- } else if (code [2] == 0xe8) {
- /* call <ADDR> */
- return NULL;
- } else if ((code [3] == 0xff) && (amd64_modrm_reg (code [4]) == 0x2) && (amd64_modrm_mod (code [4]) == 0x1) && (amd64_sib_index (code [5]) == 4) && (amd64_sib_scale (code [5]) == 0)) {
- /* call *[r12+disp8] using indexed addressing */
- if (IS_REX (code [2]))
- rex = code [2];
- reg = amd64_sib_base (code [5]);
- disp = *(gint8*)(code + 6);
- } else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
- /* call *%reg */
- return NULL;
- } else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
- /* call *[reg+disp8] */
- if (IS_REX (code [3]))
- rex = code [3];
- reg = amd64_modrm_rm (code [5]);
- disp = *(gint8*)(code + 6);
- //printf ("B: [%%r%d+0x%x]\n", reg, disp);
- }
- else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
- /* call *%reg */
- if (IS_REX (code [4]))
- rex = code [4];
- reg = amd64_modrm_rm (code [6]);
- disp = 0;
- }
- else
- g_assert_not_reached ();
-
- reg += amd64_rex_b (rex);
-
- /* R11 is clobbered by the trampoline code */
- g_assert (reg != AMD64_R11);
-
- *displacement = disp;
- return (gpointer)regs [reg];
-}
-
int
mono_arch_get_this_arg_reg (MonoMethodSignature *sig, MonoGenericSharingContext *gsctx, guint8 *code)
{
- int this_reg = AMD64_ARG_REG1;
-
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- CallInfo *cinfo;
-
- if (!gsctx && code)
- gsctx = mono_get_generic_context_from_code (code);
-
- cinfo = get_call_info (gsctx, NULL, sig, FALSE);
-
- if (cinfo->ret.storage != ArgValuetypeInReg)
- this_reg = AMD64_ARG_REG2;
- g_free (cinfo);
- }
-
- return this_reg;
+ return AMD64_ARG_REG1;
}
gpointer
return cached;
if (mono_aot_only)
- start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
+ start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
else
start = get_delegate_invoke_impl (TRUE, 0, NULL);
if (mono_aot_only) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
- start = mono_aot_get_named_code (name);
+ start = mono_aot_get_trampoline (name);
g_free (name);
} else {
start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R10, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
}
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
- /* See the comment below about R10 */
if (item->has_target_code) {
- amd64_mov_reg_imm (code, AMD64_R10, item->value.target_code);
- amd64_jump_reg (code, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->value.target_code);
+ amd64_jump_reg (code, AMD64_R11);
} else {
- amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, AMD64_R11, 0);
}
if (fail_case) {
amd64_patch (item->jmp_code, code);
- amd64_mov_reg_imm (code, AMD64_R10, fail_tramp);
- amd64_jump_reg (code, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, fail_tramp);
+ amd64_jump_reg (code, AMD64_R11);
item->jmp_code = NULL;
}
} else {
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R10, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
- /* See the comment below about R10 */
- amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, AMD64_R11, 0);
amd64_patch (item->jmp_code, code);
amd64_breakpoint (code);
item->jmp_code = NULL;
#else
- /* We're using R10 here because R11
- needs to be preserved. R10 needs
- to be preserved for calls which
- require a runtime generic context,
- but interface calls don't. */
- amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, AMD64_R11, 0);
#endif
}
} else {
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R10, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
}
item->jmp_code = code;
if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
}
}
+/*
+ * mono_arch_emit_load_aotconst:
+ *
+ * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
+ * TARGET from the mscorlib GOT in full-aot code.
+ * On AMD64, the result is placed into R11.
+ */
+guint8*
+mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
+{
+ *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+
+ return code;
+}
+
+/*
+ * mono_arch_get_trampolines:
+ *
+ * Return a list of MonoTrampInfo structures describing arch specific trampolines
+ * for AOT.
+ */
+GSList *
+mono_arch_get_trampolines (gboolean aot)
+{
+ MonoTrampInfo *info;
+ GSList *tramps = NULL;
+
+ mono_arch_get_throw_pending_exception (&info, aot);
+
+ tramps = g_slist_append (tramps, info);
+
+ return tramps;
+}
+
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED