* (C) 2003 Ximian, Inc.
* Copyright 2003-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include <string.h>
amd64_patch (code, target);
}
-typedef enum {
- ArgInIReg,
- ArgInFloatSSEReg,
- ArgInDoubleSSEReg,
- ArgOnStack,
- ArgValuetypeInReg,
- ArgValuetypeAddrInIReg,
- /* gsharedvt argument passed by addr */
- ArgGSharedVtInReg,
- ArgGSharedVtOnStack,
- ArgNone /* only in pair_storage */
-} ArgStorage;
-
-typedef struct {
- gint16 offset;
- gint8 reg;
- ArgStorage storage : 8;
- gboolean is_gsharedvt_return_value : 1;
-
- /* Only if storage == ArgValuetypeInReg */
- ArgStorage pair_storage [2];
- gint8 pair_regs [2];
- /* The size of each pair */
- int pair_size [2];
- int nregs;
- /* Only if storage == ArgOnStack */
- int arg_size;
-} ArgInfo;
-
-typedef struct {
- int nargs;
- guint32 stack_usage;
- guint32 reg_usage;
- guint32 freg_usage;
- gboolean need_stack_align;
- /* The index of the vret arg in the argument list */
- int vret_arg_index;
- ArgInfo ret;
- ArgInfo sig_cookie;
- ArgInfo args [1];
-} CallInfo;
-
#define DEBUG(a) if (cfg->verbose_level > 1) a
-#ifdef TARGET_WIN32
-static AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
-
-static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
-#else
-static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
-
- static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
-#endif
-
static void inline
add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo)
{
}
}
-#ifdef TARGET_WIN32
-#define FLOAT_PARAM_REGS 4
-#else
-#define FLOAT_PARAM_REGS 8
-#endif
-
static void inline
add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean is_double)
{
while (quadsize [0] != 1 && quadsize [0] != 2 && quadsize [0] != 4 && quadsize [0] != 8)
quadsize [0] ++;
- while (quadsize [1] != 1 && quadsize [1] != 2 && quadsize [1] != 4 && quadsize [1] != 8)
+ while (quadsize [1] != 0 && quadsize [1] != 1 && quadsize [1] != 2 && quadsize [1] != 4 && quadsize [1] != 8)
quadsize [1] ++;
ainfo->storage = ArgValuetypeInReg;
mono_aot_register_jit_icall ("mono_amd64_throw_corlib_exception", mono_amd64_throw_corlib_exception);
mono_aot_register_jit_icall ("mono_amd64_resume_unwind", mono_amd64_resume_unwind);
mono_aot_register_jit_icall ("mono_amd64_get_original_ip", mono_amd64_get_original_ip);
-#if defined(ENABLE_GSHAREDVT)
+#if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
mono_aot_register_jit_icall ("mono_amd64_start_gsharedvt_call", mono_amd64_start_gsharedvt_call);
#endif
offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
char *mname = mono_method_full_name (cfg->method, TRUE);
- cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
- cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s stack is too big.", mname));
g_free (mname);
return;
}
if (size >= 10000) {
/* Avoid asserts in emit_memcpy () */
- cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
- cfg->exception_message = g_strdup_printf ("Passing an argument of size '%d'.", size);
+ mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Passing an argument of size '%d'.", size));
/* Continue normally */
}
switch (cinfo->ret.storage) {
case ArgNone:
case ArgInIReg:
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
break;
case ArgValuetypeInReg: {
ArgInfo *ainfo = &cinfo->ret;
ArgInfo *ainfo = &cinfo->args [i];
switch (ainfo->storage) {
case ArgInIReg:
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
break;
case ArgValuetypeInReg:
if (ainfo->pair_storage [0] != ArgNone && ainfo->pair_storage [0] != ArgInIReg)
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
DynCallArgs *p = (DynCallArgs*)buf;
- int arg_index, greg, i, pindex;
+ int arg_index, greg, freg, i, pindex;
MonoMethodSignature *sig = dinfo->sig;
int buffer_offset = 0;
arg_index = 0;
greg = 0;
+ freg = 0;
pindex = 0;
if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
case MONO_TYPE_U4:
p->regs [greg ++] = *(guint32*)(arg);
break;
+ case MONO_TYPE_R4: {
+ double d;
+
+ *(float*)&d = *(float*)(arg);
+ p->has_fp = 1;
+ p->fregs [freg ++] = d;
+ break;
+ }
+ case MONO_TYPE_R8:
+ p->has_fp = 1;
+ p->fregs [freg ++] = *(double*)(arg);
+ break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (t)) {
p->regs [greg ++] = PTR_TO_GREG(*(arg));
{
ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
MonoMethodSignature *sig = dinfo->sig;
- guint8 *ret = ((DynCallArgs*)buf)->ret;
- mgreg_t res = ((DynCallArgs*)buf)->res;
+ DynCallArgs *dargs = (DynCallArgs*)buf;
+ guint8 *ret = dargs->ret;
+ mgreg_t res = dargs->res;
MonoType *sig_ret = mini_get_underlying_type (sig->ret);
switch (sig_ret->type) {
case MONO_TYPE_U8:
*(guint64*)ret = res;
break;
+ case MONO_TYPE_R4:
+ *(float*)ret = *(float*)&(dargs->fregs [0]);
+ break;
+ case MONO_TYPE_R8:
+ *(double*)ret = dargs->fregs [0];
+ break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (sig_ret)) {
*(gpointer*)ret = GREG_TO_PTR(res);
return code;
}
-#define REAL_PRINT_REG(text,reg) \
-mono_assert (reg >= 0); \
-amd64_push_reg (code, AMD64_RAX); \
-amd64_push_reg (code, AMD64_RDX); \
-amd64_push_reg (code, AMD64_RCX); \
-amd64_push_reg (code, reg); \
-amd64_push_imm (code, reg); \
-amd64_push_imm (code, text " %d %p\n"); \
-amd64_mov_reg_imm (code, AMD64_RAX, printf); \
-amd64_call_reg (code, AMD64_RAX); \
-amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 3*4); \
-amd64_pop_reg (code, AMD64_RCX); \
-amd64_pop_reg (code, AMD64_RDX); \
-amd64_pop_reg (code, AMD64_RAX);
-
/* benchmark and set based on cpu */
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
case OP_DYN_CALL: {
int i;
MonoInst *var = cfg->dyn_call_var;
+ guint8 *label;
g_assert (var->opcode == OP_REGOFFSET);
/* Save args buffer */
amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8);
+ /* Set fp arg regs */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, has_fp), sizeof (mgreg_t));
+ amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+ for (i = 0; i < FLOAT_PARAM_REGS; ++i)
+ amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs) + (i * sizeof (double)));
+ amd64_patch (label, code);
+
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof(mgreg_t), sizeof(mgreg_t));
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
+ amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, fregs), AMD64_XMM0);
break;
}
case OP_AMD64_SAVE_SP_TO_LMF: {
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, X86_FP_C0);
amd64_pop_reg (code, AMD64_RAX);
amd64_fstp (code, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "OverflowException");
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
break;
case OP_TLS_GET: {
/* Stack alignment check */
#if 0
{
+ guint8 *buf;
+
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_RSP, 8);
amd64_alu_reg_imm (code, X86_AND, AMD64_RAX, 0xf);
amd64_alu_reg_imm (code, X86_CMP, AMD64_RAX, 0);
- x86_branch8 (code, X86_CC_EQ, 2, FALSE);
+ buf = code;
+ x86_branch8 (code, X86_CC_EQ, 1, FALSE);
amd64_breakpoint (code);
+ amd64_patch (buf, code);
}
#endif
amd64_patch (patch_info->ip.i + cfg->native_code, code);
- exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
- g_assert (exc_class);
+ exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
throw_ip = patch_info->ip.i;
//x86_breakpoint (code);
}
}
-#if defined(ENABLE_GSHAREDVT) && defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
-
-#include "../../../mono-extensions/mono/mini/mini-amd64-gsharedvt.c"
-
-#endif /* !ENABLE_GSHAREDVT */
+CallInfo*
+mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
+{
+ return get_call_info (mp, sig);
+}