}
[Category ("DYNCALL")]
+ [Category ("!FULLAOT-AMD64")]
static int test_0_arm64_dyncall_hfa_double () {
double arg1 = 1.0f;
// HFA with double members
}
[Category ("DYNCALL")]
+ [Category ("!FULLAOT-AMD64")]
static int test_0_arm64_dyncall_hfa_float () {
double arg1 = 1.0f;
var s = new Struct2 ();
[Category ("DYNCALL")]
[Category ("GSHAREDVT")]
+ [Category ("!FULLAOT-AMD64")]
static int test_0_arm64_dyncall_gsharedvt_out_hfa_double () {
/* gsharedvt out trampoline with double hfa argument */
double arg1 = 1.0f;
[Category ("DYNCALL")]
[Category ("GSHAREDVT")]
+ [Category ("!FULLAOT-AMD64")]
static int test_0_arm64_dyncall_gsharedvt_out_hfa_float () {
/* gsharedvt out trampoline with double hfa argument */
double arg1 = 1.0f;
}
[Category ("DYNCALL")]
+ [Category ("GSHAREDVT")]
+ [Category ("!FULLAOT-AMD64")]
static int test_0_arm64_dyncall_vtypebyref_ret () {
var s = new VTypeByRefStruct () { o1 = 1, o2 = 2, o3 = 3 };
Type t = typeof (Foo5<>).MakeGenericType (new Type [] { typeof (VTypeByRefStruct) });
}
}
- [Category("DYNCALL")]
+ [Category ("DYNCALL")]
+ [Category ("GSHAREDVT")]
static int test_0_arm_dyncall_reg_stack_split () {
var m = typeof (Foo6).GetMethod ("reg_stack_split_inner").MakeGenericMethod (new Type[] { typeof (long) });
var o = new Foo6 ();
}
[Category ("DYNCALL")]
+ [Category ("!FULLAOT-AMD64")]
public static int test_0_dyncall_nullable () {
int? v;
return 0;
}
+ [Category ("DYNCALL")]
public static int test_0_array_accessor_runtime_invoke_ref () {
var t = typeof (string[]);
var arr = Array.CreateInstance (typeof (string), 1);
}
[Category ("DYNCALL")]
+ [Category ("!FULLAOT-AMD64")]
public static int test_0_large_nullable_invoke () {
var s = new LargeStruct () { a = 1, b = 2, c = 3, d = 4 };
* Zoltan Varga <vargaz@gmail.com>
* Rodrigo Kumpera <kumpera@gmail.com>
* Andi McClure <andi.mcclure@xamarin.com>
+ * Johan Lorensson <johan.lorensson@xamarin.com>
*
* Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
/*
* Slot mapping:
+ *
+ * System V:
* 0..5 - rdi, rsi, rdx, rcx, r8, r9
* 6..13 - xmm0..xmm7
* 14.. - stack slots
+ *
+ * Windows:
+ * 0..3 - rcx, rdx, r8, r9
+ * 4..7 - xmm0..xmm3
+ * 8.. - stack slots
+ *
*/
static inline int
map_reg (int reg)
DEBUG_AMD64_GSHAREDVT_PRINT ("-- return in (%s) out (%s) var_ret %d\n", arg_info_desc (&caller_cinfo->ret), arg_info_desc (&callee_cinfo->ret), var_ret);
if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
- /* Both the caller and the callee pass the vtype ret address in r8 */
+ /* Both the caller and the callee pass the vtype ret address in r8 (System V) and RCX or RDX (Windows) */
g_assert (gcinfo->ret.storage == ArgValuetypeAddrInIReg || gcinfo->ret.storage == ArgGsharedvtVariableInReg);
add_to_map (map, map_reg (cinfo->ret.reg), map_reg (cinfo->ret.reg));
}
* Zoltan Varga <vargaz@gmail.com>
* Rodrigo Kumpera <kumpera@gmail.com>
* Andi McClure <andi.mcclure@xamarin.com>
+ * Johan Lorensson <johan.lorensson@xamarin.com>
*
* Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
/*
* get_call_info:
*
- * Obtain information about a call according to the calling convention.
- * For AMD64, see the "System V ABI, x86-64 Architecture Processor Supplement
+ * Obtain information about a call according to the calling convention.
+ * For AMD64 System V, see the "System V ABI, x86-64 Architecture Processor Supplement
* Draft Version 0.23" document for more information.
+ * For AMD64 Windows, see "Overview of x64 Calling Conventions",
+ * https://msdn.microsoft.com/en-us/library/ms235286.aspx
*/
static CallInfo*
get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
int i;
-#ifdef HOST_WIN32
- return FALSE;
-#endif
-
switch (cinfo->ret.storage) {
case ArgNone:
case ArgInIReg:
#define MONO_ARCH_HAVE_TLS_GET_REG 1
#endif
-#if !defined (TARGET_WIN32)
#define MONO_ARCH_GSHAREDVT_SUPPORTED 1
-#endif
#if defined(TARGET_APPLETVOS)
* Zoltan Varga <vargaz@gmail.com>
* Rodrigo Kumpera <kumpera@gmail.com>
* Andi McClure <andi.mcclure@xamarin.com>
+ * Johan Lorensson <johan.lorensson@xamarin.com>
*
* Copyright 2015 Xamarin, Inc (http://www.xamarin.com)
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
return start;
}
-
gpointer
mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
{
<caller registers area>
<rgctx>
<gsharedvt info>
- <calee stack area>
- <calee reg area>
+ <callee stack area>
+ <callee reg area>
*/
/* Call start_gsharedvt_call () */
if (aot) {
code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_start_gsharedvt_call");
- amd64_call_reg (code, AMD64_R11);
+ #ifdef TARGET_WIN32
+ /* Since we are doing a call as part of setting up stackframe, the reserved shadow stack used by Windows platform is allocated up in
+ the callee stack area but currently the callee reg area is in between. Windows calling convention dictates that room is made on stack where
+ callee can save any parameters passed in registers. Since Windows x64 calling convention
+ uses 4 registers for the first 4 parameters, stack needs to be adjusted before making the call.
+ NOTE, Windows calling convention assumes that space for all registers have been reserved, regardless
+ of the number of function parameters actually used.
+ */
+ int shadow_reg_size = 0;
+
+ shadow_reg_size = ALIGN_TO (PARAM_REGS * sizeof(gpointer), MONO_ARCH_FRAME_ALIGNMENT);
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, shadow_reg_size);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, shadow_reg_size);
+ #else
+ amd64_call_reg (code, AMD64_R11);
+ #endif
} else {
g_error ("no aot");
}
*/
/* Load vret_slot */
- amd64_mov_reg_membase (code, AMD64_RDI, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RDI, n_arg_regs + n_arg_fregs);
- amd64_shift_reg_imm (code, X86_SHL, AMD64_RDI, 3);
+ /* Use first input parameter register as scratch since it is volatile on all platforms */
+ amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, AMD64_R10, MONO_STRUCT_OFFSET (GSharedVtCallInfo, vret_slot), 4);
+ amd64_alu_reg_imm (code, X86_SUB, MONO_AMD64_ARG_REG1, n_arg_regs + n_arg_fregs);
+ amd64_shift_reg_imm (code, X86_SHL, MONO_AMD64_ARG_REG1, 3);
/* vret address is RBP - (framesize - caller_reg_area_offset) */
amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
- amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, AMD64_RDI);
+ amd64_alu_reg_reg (code, X86_ADD, AMD64_R11, MONO_AMD64_ARG_REG1);
/* Load ret marshal type */
/* Load vret address in R11 */
/*
Address to write return to is in the original value of the register specified by vret_arg_reg.
- This will be either RSI or RDI depending on whether this is a static call.
+ This will be either RSI, RDI (System V) or RCX, RDX (Windows) depending on whether this is a static call.
Its location:
We alloc 'framesize' bytes below RBP to save regs, info and rgctx. RSP = RBP - framesize
- We store rdi at RSP + caller_reg_area_offset + slot_index_of (register) * 8.
+ We store RDI (System V), RCX (Windows) at RSP + caller_reg_area_offset + slot_index_of (register) * 8.
address: RBP - framesize + caller_reg_area_offset + 8*slot
*/