#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/profiler-private.h>
+#include <mono/metadata/mono-debug.h>
#include <mono/utils/mono-math.h>
#include "trace.h"
static gint appdomain_tls_offset = -1;
static gint thread_tls_offset = -1;
-/* Use SSE2 instructions for fp arithmetic */
-static gboolean use_sse2 = FALSE;
+static gboolean use_sse2 = !MONO_ARCH_USE_FPSTACK;
-/* xmm15 is reserved for use by some opcodes */
-#define AMD64_CALLEE_FREGS 0xef
-
-#define FPSTACK_SIZE 6
+const char * const amd64_desc [OP_LAST];
+static const char*const * ins_spec = amd64_desc;
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
#endif
-#define SIGNAL_STACK_SIZE (64 * 1024)
-
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG AMD64_R11
*/
/*
- * FIXME:
- * - Use xmm registers instead of the x87 stack
- * - Allocate arguments to global registers
- * - implement emulated opcodes
- * - (all archs) do not store trampoline addresses in method->info since they
- * are domain specific.
+ * Floating point comparison results:
+ * ZF PF CF
+ * A > B 0 0 0
+ * A < B 0 0 1
+ * A = B 1 0 0
+ * A > B 0 0 0
+ * UNORDERED 1 1 1
*/
#define NOT_IMPLEMENTED g_assert_not_reached ()
"xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
};
-static const char*
+const char*
mono_arch_fregname (int reg)
{
if (reg < AMD64_XMM_NREG)
return "unknown";
}
-static const char*
-mono_amd64_regname (int reg, gboolean fp)
+G_GNUC_UNUSED static void
+break_count (void)
{
- if (fp)
- return mono_arch_fregname (reg);
- else
- return mono_arch_regname (reg);
+}
+
+G_GNUC_UNUSED static gboolean
+debug_count (void)
+{
+ static int count = 0;
+ count ++;
+
+ if (!getenv ("COUNT"))
+ return TRUE;
+
+ if (count == atoi (getenv ("COUNT"))) {
+ break_count ();
+ }
+
+ if (count > atoi (getenv ("COUNT"))) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+debug_omit_fp (void)
+{
+#if 0
+ return debug_count ();
+#else
+ return TRUE;
+#endif
}
static inline void
/* call *<OFFSET>(%rip) */
*(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
}
+ else if ((code [0] == 0xe8)) {
+ /* call <DISP> */
+ gint64 disp = (guint8*)target - (guint8*)code;
+ g_assert (amd64_is_imm32 (disp));
+ x86_patch (code, (unsigned char*)target);
+ }
else
x86_patch (code, (unsigned char*)target);
}
static ArgumentClass
merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
{
- ArgumentClass class2;
+ ArgumentClass class2 = ARG_CLASS_NO_CLASS;
MonoType *ptype;
ptype = mono_type_get_underlying_type (type);
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
fpcw |= X86_FPCW_PREC_DOUBLE;
__asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
-
- mono_amd64_exceptions_init ();
- mono_amd64_tramp_init ();
}
/*
return opts;
}
+gboolean
+mono_amd64_is_sse2 (void)
+{
+ return use_sse2;
+}
+
static gboolean
is_regsize_var (MonoType *t) {
if (t->byref)
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
return TRUE;
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
return vars;
}
+/**
+ * mono_arch_compute_omit_fp:
+ *
+ * Determine whenever the frame pointer can be eliminated.
+ */
+static void
+mono_arch_compute_omit_fp (MonoCompile *cfg)
+{
+ MonoMethodSignature *sig;
+ MonoMethodHeader *header;
+ int i;
+ CallInfo *cinfo;
+
+ if (cfg->arch.omit_fp_computed)
+ return;
+
+ header = mono_method_get_header (cfg->method);
+
+ sig = mono_method_signature (cfg->method);
+
+ cinfo = get_call_info (sig, FALSE);
+
+ /*
+ * FIXME: Remove some of the restrictions.
+ */
+ cfg->arch.omit_fp = TRUE;
+ cfg->arch.omit_fp_computed = TRUE;
+
+ /* Temporarily disable this when running in the debugger until we have support
+ * for this in the debugger. */
+ if (mono_debug_using_mono_debugger ())
+ cfg->arch.omit_fp = FALSE;
+
+ if (!debug_omit_fp ())
+ cfg->arch.omit_fp = FALSE;
+ /*
+ if (cfg->method->save_lmf)
+ cfg->arch.omit_fp = FALSE;
+ */
+ if (cfg->flags & MONO_CFG_HAS_ALLOCA)
+ cfg->arch.omit_fp = FALSE;
+ if (header->num_clauses)
+ cfg->arch.omit_fp = FALSE;
+ if (cfg->param_area)
+ cfg->arch.omit_fp = FALSE;
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+ cfg->arch.omit_fp = FALSE;
+ if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
+ (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
+ cfg->arch.omit_fp = FALSE;
+ for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+ ArgInfo *ainfo = &cinfo->args [i];
+
+ if (ainfo->storage == ArgOnStack) {
+ /*
+ * The stack offset can only be determined when the frame
+ * size is known.
+ */
+ cfg->arch.omit_fp = FALSE;
+ }
+ }
+}
+
GList *
mono_arch_get_global_int_regs (MonoCompile *cfg)
{
GList *regs = NULL;
+ mono_arch_compute_omit_fp (cfg);
+
+ if (cfg->arch.omit_fp)
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+
/* We use the callee saved registers for global allocation */
regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
regs = g_list_prepend (regs, (gpointer)AMD64_R12);
}
void
-mono_arch_allocate_vars (MonoCompile *m)
+mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (m->method);
+ header = mono_method_get_header (cfg->method);
- sig = mono_method_signature (m->method);
+ sig = mono_method_signature (cfg->method);
cinfo = get_call_info (sig, FALSE);
+ mono_arch_compute_omit_fp (cfg);
+
/*
* We use the ABI calling conventions for managed code as well.
* Exception: valuetypes are never passed or returned in registers.
*/
- /* Locals are allocated backwards from %fp */
- m->frame_reg = AMD64_RBP;
- offset = 0;
+ if (cfg->arch.omit_fp) {
+ cfg->flags |= MONO_CFG_HAS_SPILLUP;
+ cfg->frame_reg = AMD64_RSP;
+ offset = 0;
+ } else {
+ /* Locals are allocated backwards from %fp */
+ cfg->frame_reg = AMD64_RBP;
+ offset = 0;
+ }
+
+ cfg->arch.reg_save_area_offset = offset;
/* Reserve space for caller saved registers */
for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (m->used_int_regs & (1 << i))) {
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
offset += sizeof (gpointer);
}
- if (m->method->save_lmf) {
+ if (cfg->method->save_lmf) {
/* Reserve stack space for saving LMF + argument regs */
- offset += sizeof (MonoLMF);
+ guint32 size = sizeof (MonoLMF);
+
if (lmf_tls_offset == -1)
/* Need to save argument regs too */
- offset += (AMD64_NREG * 8) + (8 * 8);
- m->arch.lmf_offset = offset;
+ size += (AMD64_NREG * 8) + (8 * 8);
+
+ if (cfg->arch.omit_fp) {
+ cfg->arch.lmf_offset = offset;
+ offset += size;
+ }
+ else {
+ offset += size;
+ cfg->arch.lmf_offset = -offset;
+ }
}
if (sig->ret->type != MONO_TYPE_VOID) {
case ArgInDoubleSSEReg:
if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
/* The register is volatile */
- m->ret->opcode = OP_REGOFFSET;
- m->ret->inst_basereg = AMD64_RBP;
- offset += 8;
- m->ret->inst_offset = - offset;
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp) {
+ cfg->ret->inst_offset = offset;
+ offset += 8;
+ } else {
+ offset += 8;
+ cfg->ret->inst_offset = -offset;
+ }
}
else {
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = cinfo->ret.reg;
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
}
break;
case ArgValuetypeInReg:
/* Allocate a local to hold the result, the epilog will copy it to the correct place */
+ g_assert (!cfg->arch.omit_fp);
offset += 16;
- m->ret->opcode = OP_REGOFFSET;
- m->ret->inst_basereg = AMD64_RBP;
- m->ret->inst_offset = - offset;
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = cfg->frame_reg;
+ cfg->ret->inst_offset = - offset;
break;
default:
g_assert_not_reached ();
}
- m->ret->dreg = m->ret->inst_c0;
+ cfg->ret->dreg = cfg->ret->inst_c0;
}
/* Allocate locals */
- offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
+ offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
- for (i = m->locals_start; i < m->num_varinfo; i++) {
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
- MonoInst *inst = m->varinfo [i];
+ MonoInst *inst = cfg->varinfo [i];
inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = AMD64_RBP;
- inst->inst_offset = - (offset + offsets [i]);
+ inst->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp)
+ inst->inst_offset = (offset + offsets [i]);
+ else
+ inst->inst_offset = - (offset + offsets [i]);
//printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
}
}
offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
+ g_assert (!cfg->arch.omit_fp);
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
- m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
+ cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = m->varinfo [i];
+ inst = cfg->varinfo [i];
if (inst->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
inst->dreg = ainfo->reg;
break;
case ArgOnStack:
+ g_assert (!cfg->arch.omit_fp);
inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = AMD64_RBP;
+ inst->inst_basereg = cfg->frame_reg;
inst->inst_offset = ainfo->offset + ARGS_OFFSET;
break;
case ArgValuetypeInReg:
if (!inreg && (ainfo->storage != ArgOnStack)) {
inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = AMD64_RBP;
+ inst->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
- if (ainfo->storage == ArgValuetypeInReg)
- offset += 2 * sizeof (gpointer);
- else
- offset += sizeof (gpointer);
- inst->inst_offset = - offset;
+ if (cfg->arch.omit_fp) {
+ inst->inst_offset = offset;
+ offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+ } else {
+ offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+ inst->inst_offset = - offset;
+ }
}
}
}
- m->stack_offset = offset;
+ cfg->stack_offset = offset;
g_free (cinfo);
}
else
if (sig->pinvoke)
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
- else
- size = mono_type_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
if (ainfo->storage == ArgValuetypeInReg) {
if (ainfo->pair_storage [1] == ArgNone) {
MonoInst *load;
/* emit an exception if condition is fail */
#define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
do { \
- mono_add_patch_info (cfg, code - cfg->native_code, \
- MONO_PATCH_INFO_EXC, exc_name); \
- x86_branch32 (code, cond, 0, signed); \
+ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
+ if (tins == NULL) { \
+ mono_add_patch_info (cfg, code - cfg->native_code, \
+ MONO_PATCH_INFO_EXC, exc_name); \
+ x86_branch32 (code, cond, 0, signed); \
+ } else { \
+ EMIT_COND_BRANCH (tins, cond, signed); \
+ } \
} while (0);
#define EMIT_FPCOMPARE(code) do { \
amd64_fnstsw (code); \
} while (0);
+#define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
+ amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
+ amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
+ amd64_ ##op (code); \
+ amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
+ amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
+} while (0);
+
static guint8*
emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
{
- /*
- * FIXME: Emitting a call template and patching it later is expensive on
- * amd64, so try to determine the patch target immediately, and emit more
- * efficient code if possible.
- */
-
mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
- if (mono_compile_aot) {
+ if (cfg->compile_aot) {
amd64_call_membase (code, AMD64_RIP, 0);
}
else {
- amd64_set_reg_template (code, GP_SCRATCH_REG);
- amd64_call_reg (code, GP_SCRATCH_REG);
+ gboolean near_call = FALSE;
+
+ /*
+ * Indirect calls are expensive so try to make a near call if possible.
+ * The caller memory is allocated by the code manager so it is
+ * guaranteed to be at a 32 bit offset.
+ */
+
+ if (patch_type != MONO_PATCH_INFO_ABS) {
+ /* The target is in memory allocated using the code manager */
+ near_call = TRUE;
+
+ if ((patch_type == MONO_PATCH_INFO_METHOD) || (patch_type == MONO_PATCH_INFO_METHOD_JUMP)) {
+ if (((MonoMethod*)data)->klass->image->assembly->aot_module)
+ /* The callee might be an AOT method */
+ near_call = FALSE;
+ }
+
+ if (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD) {
+ /*
+ * The call might go directly to a native function without
+ * the wrapper.
+ */
+ MonoJitICallInfo *mi = mono_find_jit_icall_by_name (data);
+ if (mi) {
+ gconstpointer target = mono_icall_get_wrapper (mi);
+ if ((((guint64)target) >> 32) != 0)
+ near_call = FALSE;
+ }
+ }
+ }
+ else {
+ if (mono_find_class_init_trampoline_by_addr (data))
+ near_call = TRUE;
+ else {
+ MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
+ if (info) {
+ if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) &&
+ strstr (cfg->method->name, info->name)) {
+ /* A call to the wrapped function */
+ if ((((guint64)data) >> 32) == 0)
+ near_call = TRUE;
+ }
+ else if (info->func == info->wrapper) {
+ /* No wrapper */
+ if ((((guint64)info->func) >> 32) == 0)
+ near_call = TRUE;
+ }
+ else
+ near_call = TRUE;
+ }
+ else if ((((guint64)data) >> 32) == 0)
+ near_call = TRUE;
+ }
+ }
+
+ if (cfg->method->dynamic)
+ /* These methods are allocated using malloc */
+ near_call = FALSE;
+
+ if (near_call) {
+ amd64_call_code (code, 0);
+ }
+ else {
+ amd64_set_reg_template (code, GP_SCRATCH_REG);
+ amd64_call_reg (code, GP_SCRATCH_REG);
+ }
}
return code;
}
/* FIXME: Add more instructions */
-#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG))
+#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG) || ((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_ICONST) || ((ins)->opcode == OP_I8CONST) || ((ins)->opcode == OP_LOAD_MEMBASE))
static void
peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
case OP_I8CONST:
/* reg = 0 -> XOR (reg, reg) */
/* XOR sets cflags on x86, so we cant do it always */
- if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
+ if (ins->inst_c0 == 0 && (ins->next && INST_IGNORES_CFLAGS (ins->next))) {
ins->opcode = CEE_XOR;
ins->sreg1 = ins->dreg;
ins->sreg2 = ins->dreg;
bb->last_ins = last_ins;
}
+static void
+insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
+{
+ if (ins == NULL) {
+ ins = bb->code;
+ bb->code = to_insert;
+ to_insert->next = ins;
+ }
+ else {
+ to_insert->next = ins->next;
+ ins->next = to_insert;
+ }
+}
+
+#define NEW_INS(cfg,dest,op) do { \
+ (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
+ (dest)->opcode = (op); \
+ insert_after_ins (bb, last_ins, (dest)); \
+ } while (0)
+
+/*
+ * mono_arch_lowering_pass:
+ *
+ * Converts complex opcodes into simpler ones so that each IR instruction
+ * corresponds to one machine instruction.
+ */
+static void
+mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins, *temp, *last_ins = NULL;
+ ins = bb->code;
+
+ if (bb->max_ireg > cfg->rs->next_vireg)
+ cfg->rs->next_vireg = bb->max_ireg;
+ if (bb->max_freg > cfg->rs->next_vfreg)
+ cfg->rs->next_vfreg = bb->max_freg;
+
+ /*
+ * FIXME: Need to add more instructions, but the current machine
+ * description can't model some parts of the composite instructions like
+ * cdq.
+ */
+ while (ins) {
+ switch (ins->opcode) {
+ case OP_DIV_IMM:
+ case OP_REM_IMM:
+ case OP_IDIV_IMM:
+ case OP_IREM_IMM:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ switch (ins->opcode) {
+ case OP_DIV_IMM:
+ ins->opcode = OP_LDIV;
+ break;
+ case OP_REM_IMM:
+ ins->opcode = OP_LREM;
+ break;
+ case OP_IDIV_IMM:
+ ins->opcode = OP_IDIV;
+ break;
+ case OP_IREM_IMM:
+ ins->opcode = OP_IREM;
+ break;
+ }
+ ins->sreg2 = temp->dreg;
+ break;
+ case OP_COMPARE_IMM:
+ if (!amd64_is_imm32 (ins->inst_imm)) {
+ NEW_INS (cfg, temp, OP_I8CONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->opcode = OP_COMPARE;
+ ins->sreg2 = temp->dreg;
+ }
+ break;
+ case OP_LOAD_MEMBASE:
+ case OP_LOADI8_MEMBASE:
+ if (!amd64_is_imm32 (ins->inst_offset)) {
+ NEW_INS (cfg, temp, OP_I8CONST);
+ temp->inst_c0 = ins->inst_offset;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
+ ins->inst_indexreg = temp->dreg;
+ }
+ break;
+ case OP_STORE_MEMBASE_IMM:
+ case OP_STOREI8_MEMBASE_IMM:
+ if (!amd64_is_imm32 (ins->inst_imm)) {
+ NEW_INS (cfg, temp, OP_I8CONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->opcode = OP_STOREI8_MEMBASE_REG;
+ ins->sreg1 = temp->dreg;
+ }
+ break;
+ default:
+ break;
+ }
+ last_ins = ins;
+ ins = ins->next;
+ }
+ bb->last_ins = last_ins;
+
+ bb->max_ireg = cfg->rs->next_vireg;
+ bb->max_freg = cfg->rs->next_vfreg;
+}
+
static const int
branch_cc_table [] = {
X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
return -1;
}
+/*#include "cprop.c"*/
+
/*
- * returns the offset used by spillvar. It allocates a new
- * spill variable if necessary.
+ * Local register allocation.
+ * We first scan the list of instructions and we save the liveness info of
+ * each register (when the register is first used, when it's value is set etc.).
+ * We also reverse the list of instructions (in the InstList list) because assigning
+ * registers backwards allows for more tricks to be used.
*/
-static int
-mono_spillvar_offset (MonoCompile *cfg, int spillvar)
+void
+mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoSpillInfo **si, *info;
- int i = 0;
-
- si = &cfg->spill_info;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset += sizeof (gpointer);
- info->offset = - cfg->stack_offset;
- }
-
- if (i == spillvar)
- return (*si)->offset;
+ if (!bb->code)
+ return;
- i++;
- si = &(*si)->next;
- }
+ mono_arch_lowering_pass (cfg, bb);
- g_assert_not_reached ();
- return 0;
+ mono_local_regalloc (cfg, bb);
}
-/*
- * returns the offset used by spillvar. It allocates a new
- * spill float variable if necessary.
- * (same as mono_spillvar_offset but for float)
- */
-static int
-mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
+static unsigned char*
+emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
- MonoSpillInfo **si, *info;
- int i = 0;
-
- si = &cfg->spill_info_float;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset += sizeof (double);
- info->offset = - cfg->stack_offset;
- }
-
- if (i == spillvar)
- return (*si)->offset;
-
- i++;
- si = &(*si)->next;
+ if (use_sse2) {
+ amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
+ }
+ else {
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
+ x86_fnstcw_membase(code, AMD64_RSP, 0);
+ amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
+ amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
+ amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
+ amd64_fldcw_membase (code, AMD64_RSP, 2);
+ amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
+ amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
+ amd64_pop_reg (code, dreg);
+ amd64_fldcw_membase (code, AMD64_RSP, 0);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
}
- g_assert_not_reached ();
- return 0;
-}
-
-/*
- * Creates a store for spilled floating point items
- */
-static MonoInst*
-create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
-{
- MonoInst *store;
- MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
- store->sreg1 = reg;
- store->inst_destbasereg = AMD64_RBP;
- store->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
- DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
- return store;
+ if (size == 1)
+ amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
+ else if (size == 2)
+ amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
+ return code;
}
-/*
- * Creates a load for spilled floating point items
- */
-static MonoInst*
-create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
+static unsigned char*
+mono_emit_stack_alloc (guchar *code, MonoInst* tree)
{
- MonoInst *load;
- MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
- load->dreg = reg;
- load->inst_basereg = AMD64_RBP;
- load->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
- DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
- return load;
-}
-
-#define is_global_ireg(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_SAVED_REG ((r)))
-#define ireg_is_freeable(r) ((r) >= 0 && (r) <= 15 && AMD64_IS_CALLEE_REG ((r)))
-#define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
-
-#define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
-#define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
-#define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
-#define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
-#define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
-#define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
-#define dreg_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
-
-typedef struct {
- int born_in;
- int killed_in;
- int last_use;
- int prev_use;
- int flags; /* used to track fp spill/load */
-} RegTrack;
+ int sreg = tree->sreg1;
+ int need_touch = FALSE;
-static const char*const * ins_spec = amd64_desc;
+#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+ if (!tree->flags & MONO_INST_INIT)
+ need_touch = TRUE;
+#endif
-static void
-print_ins (int i, MonoInst *ins)
-{
- const char *spec = ins_spec [ins->opcode];
- g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
- if (!spec)
- g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
- if (spec [MONO_INST_DEST]) {
- gboolean fp = (spec [MONO_INST_DEST] == 'f');
- if (reg_is_soft (ins->dreg, fp))
- g_print (" R%d <-", ins->dreg);
- else
- g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
- }
- if (spec [MONO_INST_SRC1]) {
- gboolean fp = (spec [MONO_INST_SRC1] == 'f');
- if (reg_is_soft (ins->sreg1, fp))
- g_print (" R%d", ins->sreg1);
- else
- g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
- }
- if (spec [MONO_INST_SRC2]) {
- gboolean fp = (spec [MONO_INST_SRC2] == 'f');
- if (reg_is_soft (ins->sreg2, fp))
- g_print (" R%d", ins->sreg2);
- else
- g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
- }
- if (spec [MONO_INST_CLOB])
- g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
- g_print ("\n");
-}
+ if (need_touch) {
+ guint8* br[5];
-static void
-print_regtrack (RegTrack *t, int num)
-{
- int i;
- char buf [32];
- const char *r;
-
- for (i = 0; i < num; ++i) {
- if (!t [i].born_in)
- continue;
- if (i >= MONO_MAX_IREGS) {
- g_snprintf (buf, sizeof(buf), "R%d", i);
- r = buf;
- } else
- r = mono_arch_regname (i);
- g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
+ /*
+ * Under Windows:
+ * If requested stack size is larger than one page,
+ * perform stack-touch operation
+ */
+ /*
+ * Generate stack probe code.
+ * Under Windows, it is necessary to allocate one page at a time,
+ * "touching" stack after each successful sub-allocation. This is
+ * because of the way stack growth is implemented - there is a
+ * guard page before the lowest stack page that is currently commited.
+ * Stack normally grows sequentially so OS traps access to the
+ * guard page and commits more pages when needed.
+ */
+ amd64_test_reg_imm (code, sreg, ~0xFFF);
+ br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
+
+ br[2] = code; /* loop */
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
+ amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
+ amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
+ amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
+ br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
+ amd64_patch (br[3], br[2]);
+ amd64_test_reg_reg (code, sreg, sreg);
+ br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
+ amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
+
+ br[1] = code; x86_jump8 (code, 0);
+
+ amd64_patch (br[0], code);
+ amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
+ amd64_patch (br[1], code);
+ amd64_patch (br[4], code);
}
-}
+ else
+ amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
-typedef struct InstList InstList;
-
-struct InstList {
- InstList *prev;
- InstList *next;
- MonoInst *data;
-};
-
-static inline InstList*
-inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
-{
- InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
- item->data = data;
- item->prev = NULL;
- item->next = list;
- if (list)
- list->prev = item;
- return item;
-}
-
-/*
- * Force the spilling of the variable in the symbolic register 'reg'.
- */
-static int
-get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
-{
- MonoInst *load;
- int i, sel, spill;
- int *assign, *symbolic;
-
- if (fp) {
- assign = cfg->rs->fassign;
- symbolic = cfg->rs->fsymbolic;
- }
- else {
- assign = cfg->rs->iassign;
- symbolic = cfg->rs->isymbolic;
- }
-
- sel = assign [reg];
- /*i = cfg->rs->isymbolic [sel];
- g_assert (i == reg);*/
- i = reg;
- spill = ++cfg->spill_count;
- assign [i] = -spill - 1;
- if (fp)
- mono_regstate_free_float (cfg->rs, sel);
- else
- mono_regstate_free_int (cfg->rs, sel);
- /* we need to create a spill var and insert a load to sel after the current instruction */
- if (fp)
- MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
- else
- MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
- load->dreg = sel;
- load->inst_basereg = AMD64_RBP;
- load->inst_offset = mono_spillvar_offset (cfg, spill);
- if (item->prev) {
- while (ins->next != item->prev->data)
- ins = ins->next;
- }
- load->next = ins->next;
- ins->next = load;
- DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
- if (fp)
- i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
- else
- i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
- g_assert (i == sel);
-
- return sel;
-}
-
-static int
-get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
-{
- MonoInst *load;
- int i, sel, spill;
- int *assign, *symbolic;
-
- if (fp) {
- assign = cfg->rs->fassign;
- symbolic = cfg->rs->fsymbolic;
- }
- else {
- assign = cfg->rs->iassign;
- symbolic = cfg->rs->isymbolic;
- }
-
- DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
- /* exclude the registers in the current instruction */
- if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
- if (reg_is_soft (ins->sreg1, fp))
- regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
- else
- regmask &= ~ (1 << ins->sreg1);
- DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
- }
- if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
- if (reg_is_soft (ins->sreg2, fp))
- regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
- else
- regmask &= ~ (1 << ins->sreg2);
- DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
- }
- if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
- regmask &= ~ (1 << ins->dreg);
- DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
- }
-
- DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
- g_assert (regmask); /* need at least a register we can free */
- sel = -1;
- /* we should track prev_use and spill the register that's farther */
- if (fp) {
- for (i = 0; i < MONO_MAX_FREGS; ++i) {
- if (regmask & (1 << i)) {
- sel = i;
- DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
- break;
- }
- }
-
- i = cfg->rs->fsymbolic [sel];
- spill = ++cfg->spill_count;
- cfg->rs->fassign [i] = -spill - 1;
- mono_regstate_free_float (cfg->rs, sel);
- }
- else {
- for (i = 0; i < MONO_MAX_IREGS; ++i) {
- if (regmask & (1 << i)) {
- sel = i;
- DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
- break;
- }
- }
-
- i = cfg->rs->isymbolic [sel];
- spill = ++cfg->spill_count;
- cfg->rs->iassign [i] = -spill - 1;
- mono_regstate_free_int (cfg->rs, sel);
- }
-
- /* we need to create a spill var and insert a load to sel after the current instruction */
- MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
- load->dreg = sel;
- load->inst_basereg = AMD64_RBP;
- load->inst_offset = mono_spillvar_offset (cfg, spill);
- if (item->prev) {
- while (ins->next != item->prev->data)
- ins = ins->next;
- }
- load->next = ins->next;
- ins->next = load;
- DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
- if (fp)
- i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
- else
- i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
- g_assert (i == sel);
-
- return sel;
-}
-
-static MonoInst*
-create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
-{
- MonoInst *copy;
-
- if (fp)
- MONO_INST_NEW (cfg, copy, OP_FMOVE);
- else
- MONO_INST_NEW (cfg, copy, OP_MOVE);
-
- copy->dreg = dest;
- copy->sreg1 = src;
- if (ins) {
- copy->next = ins->next;
- ins->next = copy;
- }
- DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
- return copy;
-}
-
-static MonoInst*
-create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
-{
- MonoInst *store;
- MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
- store->sreg1 = reg;
- store->inst_destbasereg = AMD64_RBP;
- store->inst_offset = mono_spillvar_offset (cfg, spill);
- if (ins) {
- store->next = ins->next;
- ins->next = store;
- }
- DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
- return store;
-}
-
-static void
-insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
-{
- MonoInst *prev;
- if (item->next) {
- prev = item->next->data;
-
- while (prev->next != ins)
- prev = prev->next;
- to_insert->next = ins;
- prev->next = to_insert;
- } else {
- to_insert->next = ins;
- }
- /*
- * needed otherwise in the next instruction we can add an ins to the
- * end and that would get past this instruction.
- */
- item->data = to_insert;
-}
-
-/* flags used in reginfo->flags */
-enum {
- MONO_X86_FP_NEEDS_LOAD_SPILL = 1 << 0,
- MONO_X86_FP_NEEDS_SPILL = 1 << 1,
- MONO_X86_FP_NEEDS_LOAD = 1 << 2,
- MONO_X86_REG_NOT_ECX = 1 << 3,
- MONO_X86_REG_EAX = 1 << 4,
- MONO_X86_REG_EDX = 1 << 5,
- MONO_X86_REG_ECX = 1 << 6
-};
-
-static int
-mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
-{
- int val;
- int test_mask = dest_mask;
-
- if (flags & MONO_X86_REG_EAX)
- test_mask &= (1 << AMD64_RAX);
- else if (flags & MONO_X86_REG_EDX)
- test_mask &= (1 << AMD64_RDX);
- else if (flags & MONO_X86_REG_ECX)
- test_mask &= (1 << AMD64_RCX);
- else if (flags & MONO_X86_REG_NOT_ECX)
- test_mask &= ~ (1 << AMD64_RCX);
-
- val = mono_regstate_alloc_int (cfg->rs, test_mask);
- if (val >= 0 && test_mask != dest_mask)
- DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
-
- if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
- DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
- val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
- }
-
- if (val < 0) {
- val = mono_regstate_alloc_int (cfg->rs, dest_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
- }
-
- return val;
-}
-
-static int
-mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
-{
- int val;
-
- val = mono_regstate_alloc_float (cfg->rs, dest_mask);
-
- if (val < 0) {
- val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
- }
-
- return val;
-}
-
-static inline void
-assign_ireg (MonoRegState *rs, int reg, int hreg)
-{
- g_assert (reg >= MONO_MAX_IREGS);
- g_assert (hreg < MONO_MAX_IREGS);
- g_assert (! is_global_ireg (hreg));
-
- rs->iassign [reg] = hreg;
- rs->isymbolic [hreg] = reg;
- rs->ifree_mask &= ~ (1 << hreg);
-}
-
-/*#include "cprop.c"*/
-
-/*
- * Local register allocation.
- * We first scan the list of instructions and we save the liveness info of
- * each register (when the register is first used, when it's value is set etc.).
- * We also reverse the list of instructions (in the InstList list) because assigning
- * registers backwards allows for more tricks to be used.
- */
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- MonoInst *ins;
- MonoRegState *rs = cfg->rs;
- int i, val, fpcount;
- RegTrack *reginfo, *reginfof;
- RegTrack *reginfo1, *reginfo2, *reginfod;
- InstList *tmp, *reversed = NULL;
- const char *spec;
- guint32 src1_mask, src2_mask, dest_mask;
- GList *fspill_list = NULL;
- int fspill = 0;
-
- if (!bb->code)
- return;
- rs->next_vireg = bb->max_ireg;
- rs->next_vfreg = bb->max_freg;
- mono_regstate_assign (rs);
- reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
- reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
- rs->ifree_mask = AMD64_CALLEE_REGS;
- rs->ffree_mask = AMD64_CALLEE_FREGS;
-
- if (!use_sse2)
- /* The fp stack is 6 entries deep */
- rs->ffree_mask = 0x3f;
-
- ins = bb->code;
-
- /*if (cfg->opt & MONO_OPT_COPYPROP)
- local_copy_prop (cfg, ins);*/
-
- i = 1;
- fpcount = 0;
- DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
- /* forward pass on the instructions to collect register liveness info */
- while (ins) {
- spec = ins_spec [ins->opcode];
-
- DEBUG (print_ins (i, ins));
-
- if (spec [MONO_INST_SRC1]) {
- if (spec [MONO_INST_SRC1] == 'f') {
- reginfo1 = reginfof;
-
- if (!use_sse2) {
- GList *spill;
-
- spill = g_list_first (fspill_list);
- if (spill && fpcount < FPSTACK_SIZE) {
- reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
- fspill_list = g_list_remove (fspill_list, spill->data);
- } else
- fpcount--;
- }
- }
- else
- reginfo1 = reginfo;
- reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
- reginfo1 [ins->sreg1].last_use = i;
- if (spec [MONO_INST_SRC1] == 'L') {
- /* The virtual register is allocated sequentially */
- reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
- reginfo1 [ins->sreg1 + 1].last_use = i;
- if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
- reginfo1 [ins->sreg1 + 1].born_in = i;
-
- reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
- reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
- }
- } else {
- ins->sreg1 = -1;
- }
- if (spec [MONO_INST_SRC2]) {
- if (spec [MONO_INST_SRC2] == 'f') {
- reginfo2 = reginfof;
-
- if (!use_sse2) {
- GList *spill;
-
- spill = g_list_first (fspill_list);
- if (spill) {
- reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
- fspill_list = g_list_remove (fspill_list, spill->data);
- if (fpcount >= FPSTACK_SIZE) {
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
- }
- } else
- fpcount--;
- }
- }
- else
- reginfo2 = reginfo;
- reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
- reginfo2 [ins->sreg2].last_use = i;
- if (spec [MONO_INST_SRC2] == 'L') {
- /* The virtual register is allocated sequentially */
- reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
- reginfo2 [ins->sreg2 + 1].last_use = i;
- if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
- reginfo2 [ins->sreg2 + 1].born_in = i;
- }
- if (spec [MONO_INST_CLOB] == 's') {
- reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
- reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
- }
- } else {
- ins->sreg2 = -1;
- }
- if (spec [MONO_INST_DEST]) {
- if (spec [MONO_INST_DEST] == 'f') {
- reginfod = reginfof;
- if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
- if (fpcount >= FPSTACK_SIZE) {
- reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- fpcount--;
- }
- fpcount++;
- }
- }
- else
- reginfod = reginfo;
- if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
- reginfod [ins->dreg].killed_in = i;
- reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
- reginfod [ins->dreg].last_use = i;
- if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
- reginfod [ins->dreg].born_in = i;
- if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
- /* The virtual register is allocated sequentially */
- reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
- reginfod [ins->dreg + 1].last_use = i;
- if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
- reginfod [ins->dreg + 1].born_in = i;
-
- reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
- reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
- }
- } else {
- ins->dreg = -1;
- }
-
- if (spec [MONO_INST_CLOB] == 'c') {
- /* A call instruction implicitly uses all registers in call->out_ireg_args */
-
- MonoCallInst *call = (MonoCallInst*)ins;
- GSList *list;
-
- list = call->out_ireg_args;
- if (list) {
- while (list) {
- guint64 regpair;
- int reg, hreg;
-
- regpair = (guint64) (list->data);
- hreg = regpair >> 32;
- reg = regpair & 0xffffffff;
-
- reginfo [reg].prev_use = reginfo [reg].last_use;
- reginfo [reg].last_use = i;
-
- list = g_slist_next (list);
- }
- }
-
- list = call->out_freg_args;
- if (use_sse2 && list) {
- while (list) {
- guint64 regpair;
- int reg, hreg;
-
- regpair = (guint64) (list->data);
- hreg = regpair >> 32;
- reg = regpair & 0xffffffff;
-
- reginfof [reg].prev_use = reginfof [reg].last_use;
- reginfof [reg].last_use = i;
-
- list = g_slist_next (list);
- }
- }
- }
-
- reversed = inst_list_prepend (cfg->mempool, reversed, ins);
- ++i;
- ins = ins->next;
- }
-
- // todo: check if we have anything left on fp stack, in verify mode?
- fspill = 0;
-
- DEBUG (print_regtrack (reginfo, rs->next_vireg));
- DEBUG (print_regtrack (reginfof, rs->next_vfreg));
- tmp = reversed;
- while (tmp) {
- int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
- dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
- --i;
- ins = tmp->data;
- spec = ins_spec [ins->opcode];
- prev_dreg = -1;
- clob_dreg = -1;
- DEBUG (g_print ("processing:"));
- DEBUG (print_ins (i, ins));
- if (spec [MONO_INST_CLOB] == 's') {
- /*
- * Shift opcodes, SREG2 must be RCX
- */
- if (rs->ifree_mask & (1 << AMD64_RCX)) {
- if (ins->sreg2 < MONO_MAX_IREGS) {
- /* Argument already in hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
- insert_before_ins (ins, tmp, copy);
- }
- else {
- DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
- assign_ireg (rs, ins->sreg2, AMD64_RCX);
- }
- } else {
- int need_ecx_spill = TRUE;
- /*
- * we first check if src1/dreg is already assigned a register
- * and then we force a spill of the var assigned to ECX.
- */
- /* the destination register can't be ECX */
- dest_mask &= ~ (1 << AMD64_RCX);
- src1_mask &= ~ (1 << AMD64_RCX);
- val = rs->iassign [ins->dreg];
- /*
- * the destination register is already assigned to ECX:
- * we need to allocate another register for it and then
- * copy from this to ECX.
- */
- if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
- int new_dest;
- new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- g_assert (new_dest >= 0);
- DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
-
- rs->isymbolic [new_dest] = ins->dreg;
- rs->iassign [ins->dreg] = new_dest;
- clob_dreg = ins->dreg;
- ins->dreg = new_dest;
- create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
- need_ecx_spill = FALSE;
- /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
- val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
- rs->iassign [ins->dreg] = val;
- rs->isymbolic [val] = prev_dreg;
- ins->dreg = val;*/
- }
- if (is_global_ireg (ins->sreg2)) {
- MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
- insert_before_ins (ins, tmp, copy);
- }
- else {
- val = rs->iassign [ins->sreg2];
- if (val >= 0 && val != AMD64_RCX) {
- MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
- DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
- move->next = ins;
- g_assert_not_reached ();
- /* FIXME: where is move connected to the instruction list? */
- //tmp->prev->data->next = move;
- }
- else {
- if (val == AMD64_RCX)
- need_ecx_spill = FALSE;
- }
- }
- if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
- mono_regstate_free_int (rs, AMD64_RCX);
- }
- if (!is_global_ireg (ins->sreg2))
- /* force-set sreg2 */
- assign_ireg (rs, ins->sreg2, AMD64_RCX);
- }
- ins->sreg2 = AMD64_RCX;
- } else if (spec [MONO_INST_CLOB] == 'd') {
- /*
- * DIVISION/REMAINER
- */
- int dest_reg = AMD64_RAX;
- int clob_reg = AMD64_RDX;
- if (spec [MONO_INST_DEST] == 'd') {
- dest_reg = AMD64_RDX; /* reminder */
- clob_reg = AMD64_RAX;
- }
- if (is_global_ireg (ins->dreg))
- val = ins->dreg;
- else
- val = rs->iassign [ins->dreg];
- if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
- mono_regstate_free_int (rs, dest_reg);
- }
- if (val < 0) {
- if (val < -1) {
- /* the register gets spilled after this inst */
- int spill = -val -1;
- dest_mask = 1 << clob_reg;
- prev_dreg = ins->dreg;
- val = mono_regstate_alloc_int (rs, dest_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
- rs->iassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
- DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
- rs->isymbolic [val] = prev_dreg;
- ins->dreg = val;
- } else {
- DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
- prev_dreg = ins->dreg;
- assign_ireg (rs, ins->dreg, dest_reg);
- ins->dreg = dest_reg;
- val = dest_reg;
- }
- }
-
- //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
- if (val != dest_reg) { /* force a copy */
- create_copy_ins (cfg, val, dest_reg, ins, FALSE);
- if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
- mono_regstate_free_int (rs, dest_reg);
- }
- }
- if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= MONO_MAX_IREGS)) {
- DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
- mono_regstate_free_int (rs, clob_reg);
- }
- src1_mask = 1 << AMD64_RAX;
- src2_mask = 1 << AMD64_RCX;
- }
- if (spec [MONO_INST_DEST] == 'l') {
- int hreg;
- val = rs->iassign [ins->dreg];
- /* check special case when dreg have been moved from ecx (clob shift) */
- if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
- hreg = clob_dreg + 1;
- else
- hreg = ins->dreg + 1;
-
- /* base prev_dreg on fixed hreg, handle clob case */
- val = hreg - 1;
-
- if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
- DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
- mono_regstate_free_int (rs, AMD64_RAX);
- }
- if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
- DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
- mono_regstate_free_int (rs, AMD64_RDX);
- }
- }
-
- /*
- * TRACK DREG
- */
- if (spec [MONO_INST_DEST] == 'f') {
- if (use_sse2) {
- /* Allocate an XMM reg the same way as an int reg */
- if (reg_is_soft (ins->dreg, TRUE)) {
- val = rs->fassign [ins->dreg];
- prev_dreg = ins->dreg;
-
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
- rs->fassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
- }
- DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
- rs->fsymbolic [val] = prev_dreg;
- ins->dreg = val;
- }
- }
- else if (spec [MONO_INST_CLOB] != 'm') {
- if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
- GList *spill_node;
- MonoInst *store;
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
- insert_before_ins (ins, tmp, store);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- fspill--;
- }
- }
- } else if (spec [MONO_INST_DEST] == 'L') {
- int hreg;
- val = rs->iassign [ins->dreg];
- /* check special case when dreg have been moved from ecx (clob shift) */
- if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
- hreg = clob_dreg + 1;
- else
- hreg = ins->dreg + 1;
-
- /* base prev_dreg on fixed hreg, handle clob case */
- prev_dreg = hreg - 1;
-
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- rs->iassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
- }
-
- DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
-
- rs->isymbolic [val] = hreg - 1;
- ins->dreg = val;
-
- val = rs->iassign [hreg];
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
- rs->iassign [hreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
- }
-
- DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
- rs->isymbolic [val] = hreg;
- /* save reg allocating into unused */
- ins->unused = val;
-
- /* check if we can free our long reg */
- if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
- mono_regstate_free_int (rs, val);
- }
- }
- else if (ins->dreg >= MONO_MAX_IREGS) {
- int hreg;
- val = rs->iassign [ins->dreg];
- if (spec [MONO_INST_DEST] == 'l') {
- /* check special case when dreg have been moved from ecx (clob shift) */
- if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
- hreg = clob_dreg + 1;
- else
- hreg = ins->dreg + 1;
-
- /* base prev_dreg on fixed hreg, handle clob case */
- prev_dreg = hreg - 1;
- } else
- prev_dreg = ins->dreg;
-
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- rs->iassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
- }
- DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
- rs->isymbolic [val] = prev_dreg;
- ins->dreg = val;
- /* handle cases where lreg needs to be eax:edx */
- if (spec [MONO_INST_DEST] == 'l') {
- /* check special case when dreg have been moved from ecx (clob shift) */
- int hreg = prev_dreg + 1;
- val = rs->iassign [hreg];
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
- rs->iassign [hreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
- }
- DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
- rs->isymbolic [val] = hreg;
- if (ins->dreg == AMD64_RAX) {
- if (val != AMD64_RDX)
- create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
- } else if (ins->dreg == AMD64_RDX) {
- if (val == AMD64_RAX) {
- /* swap */
- g_assert_not_reached ();
- } else {
- /* two forced copies */
- create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
- create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
- }
- } else {
- if (val == AMD64_RDX) {
- create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
- } else {
- /* two forced copies */
- create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
- create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
- }
- }
- if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
- mono_regstate_free_int (rs, val);
- }
- } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
- /* this instruction only outputs to EAX, need to copy */
- create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
- } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
- create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
- }
- }
-
- if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
- mono_regstate_free_float (rs, ins->dreg);
- }
- if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
- mono_regstate_free_int (rs, ins->dreg);
- }
-
- /* put src1 in EAX if it needs to be */
- if (spec [MONO_INST_SRC1] == 'a') {
- if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
- mono_regstate_free_int (rs, AMD64_RAX);
- }
- if (ins->sreg1 < MONO_MAX_IREGS) {
- /* The argument is already in a hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
- insert_before_ins (ins, tmp, copy);
- }
- else
- /* force-set sreg1 */
- assign_ireg (rs, ins->sreg1, AMD64_RAX);
- ins->sreg1 = AMD64_RAX;
- }
-
- /*
- * TRACK SREG1
- */
- if (spec [MONO_INST_SRC1] == 'f') {
- if (use_sse2) {
- if (reg_is_soft (ins->sreg1, TRUE)) {
- val = rs->fassign [ins->sreg1];
- prev_sreg1 = ins->sreg1;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
- rs->fassign [ins->sreg1] = val;
- DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
- if (spill) {
- MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
- insert_before_ins (ins, tmp, store);
- }
- }
- rs->fsymbolic [val] = prev_sreg1;
- ins->sreg1 = val;
- } else {
- prev_sreg1 = -1;
- }
- }
- else
- if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
- MonoInst *load;
- MonoInst *store = NULL;
-
- if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
- GList *spill_node;
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- }
-
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
- insert_before_ins (ins, tmp, load);
- if (store)
- insert_before_ins (load, tmp, store);
- }
- } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
- /* force source to be same as dest */
- rs->iassign [ins->sreg1] = ins->dreg;
- rs->iassign [ins->sreg1 + 1] = ins->unused;
-
- DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
- DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
-
- ins->sreg1 = ins->dreg;
- /*
- * No need for saving the reg, we know that src1=dest in this cases
- * ins->inst_c0 = ins->unused;
- */
-
- /* make sure that we remove them from free mask */
- rs->ifree_mask &= ~ (1 << ins->dreg);
- rs->ifree_mask &= ~ (1 << ins->unused);
- }
- else if (ins->sreg1 >= MONO_MAX_IREGS) {
- val = rs->iassign [ins->sreg1];
- prev_sreg1 = ins->sreg1;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- if (0 && (ins->opcode == OP_MOVE)) {
- /*
- * small optimization: the dest register is already allocated
- * but the src one is not: we can simply assign the same register
- * here and peephole will get rid of the instruction later.
- * This optimization may interfere with the clobbering handling:
- * it removes a mov operation that will be added again to handle clobbering.
- * There are also some other issues that should with make testjit.
- */
- mono_regstate_alloc_int (rs, 1 << ins->dreg);
- val = rs->iassign [ins->sreg1] = ins->dreg;
- //g_assert (val >= 0);
- DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
- } else {
- //g_assert (val == -1); /* source cannot be spilled */
- val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
- rs->iassign [ins->sreg1] = val;
- DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
- }
- if (spill) {
- MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
- insert_before_ins (ins, tmp, store);
- }
- }
- rs->isymbolic [val] = prev_sreg1;
- ins->sreg1 = val;
- } else {
- prev_sreg1 = -1;
- }
-
- /* handle clobbering of sreg1 */
- if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
- MonoInst *sreg2_copy = NULL;
- MonoInst *copy;
- gboolean fp = (spec [MONO_INST_SRC1] == 'f');
-
- if (ins->dreg == ins->sreg2) {
- /*
- * copying sreg1 to dreg could clobber sreg2, so allocate a new
- * register for it.
- */
- int reg2 = 0;
-
- if (fp)
- reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
- else
- reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
-
- DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
- sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
- prev_sreg2 = ins->sreg2 = reg2;
-
- if (fp)
- mono_regstate_free_float (rs, reg2);
- else
- mono_regstate_free_int (rs, reg2);
- }
-
- copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
- DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
- insert_before_ins (ins, tmp, copy);
-
- if (sreg2_copy)
- insert_before_ins (copy, tmp, sreg2_copy);
-
- /*
- * Need to prevent sreg2 to be allocated to sreg1, since that
- * would screw up the previous copy.
- */
- src2_mask &= ~ (1 << ins->sreg1);
- /* we set sreg1 to dest as well */
- prev_sreg1 = ins->sreg1 = ins->dreg;
- src2_mask &= ~ (1 << ins->dreg);
- }
-
- /*
- * TRACK SREG2
- */
- if (spec [MONO_INST_SRC2] == 'f') {
- if (use_sse2) {
- if (reg_is_soft (ins->sreg2, TRUE)) {
- val = rs->fassign [ins->sreg2];
- prev_sreg2 = ins->sreg2;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
- rs->fassign [ins->sreg2] = val;
- DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
- if (spill)
- create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
- }
- rs->fsymbolic [val] = prev_sreg2;
- ins->sreg2 = val;
- } else {
- prev_sreg2 = -1;
- }
- }
- else
- if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
- MonoInst *load;
- MonoInst *store = NULL;
-
- if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
- GList *spill_node;
-
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
- if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
- spill_node = g_list_next (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- }
-
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
- insert_before_ins (ins, tmp, load);
- if (store)
- insert_before_ins (load, tmp, store);
- }
- }
- else if (ins->sreg2 >= MONO_MAX_IREGS) {
- val = rs->iassign [ins->sreg2];
- prev_sreg2 = ins->sreg2;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
- rs->iassign [ins->sreg2] = val;
- DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
- if (spill)
- create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
- }
- rs->isymbolic [val] = prev_sreg2;
- ins->sreg2 = val;
- if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
- DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
- }
- } else {
- prev_sreg2 = -1;
- }
-
- if (spec [MONO_INST_CLOB] == 'c') {
- int j, s;
- MonoCallInst *call = (MonoCallInst*)ins;
- GSList *list;
- guint32 clob_mask = AMD64_CALLEE_REGS;
-
- for (j = 0; j < MONO_MAX_IREGS; ++j) {
- s = 1 << j;
- if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
- mono_regstate_free_int (rs, j);
- //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
- }
- }
-
- if (use_sse2) {
- clob_mask = AMD64_CALLEE_FREGS;
-
- for (j = 0; j < MONO_MAX_FREGS; ++j) {
- s = 1 << j;
- if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
- get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
- mono_regstate_free_float (rs, j);
- //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
- }
- }
- }
-
- /*
- * Assign all registers in call->out_reg_args to the proper
- * argument registers.
- */
-
- list = call->out_ireg_args;
- if (list) {
- while (list) {
- guint64 regpair;
- int reg, hreg;
-
- regpair = (guint64) (list->data);
- hreg = regpair >> 32;
- reg = regpair & 0xffffffff;
-
- assign_ireg (rs, reg, hreg);
-
- DEBUG (g_print ("\tassigned arg reg %s to R%d\n", mono_arch_regname (hreg), reg));
-
- list = g_slist_next (list);
- }
- g_slist_free (call->out_ireg_args);
- }
-
- list = call->out_freg_args;
- if (list && use_sse2) {
- while (list) {
- guint64 regpair;
- int reg, hreg;
-
- regpair = (guint64) (list->data);
- hreg = regpair >> 32;
- reg = regpair & 0xffffffff;
-
- rs->fassign [reg] = hreg;
- rs->fsymbolic [hreg] = reg;
- rs->ffree_mask &= ~ (1 << hreg);
-
- list = g_slist_next (list);
- }
- }
- if (call->out_freg_args)
- g_slist_free (call->out_freg_args);
- }
-
- /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
- DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
- mono_regstate_free_int (rs, ins->sreg1);
- }
- if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
- DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
- mono_regstate_free_int (rs, ins->sreg2);
- }*/
-
- DEBUG (print_ins (i, ins));
- /* this may result from a insert_before call */
- if (!tmp->next)
- bb->code = tmp->data;
- tmp = tmp->next;
- }
-
- g_free (reginfo);
- g_free (reginfof);
- g_list_free (fspill_list);
-}
-
-static unsigned char*
-emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
-{
- if (use_sse2) {
- amd64_sse_cvtsd2si_reg_reg (code, dreg, sreg);
- }
- else {
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
- x86_fnstcw_membase(code, AMD64_RSP, 0);
- amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
- amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
- amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
- amd64_fldcw_membase (code, AMD64_RSP, 2);
- amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
- amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
- amd64_pop_reg (code, dreg);
- amd64_fldcw_membase (code, AMD64_RSP, 0);
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
- }
-
- if (size == 1)
- amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
- else if (size == 2)
- amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
- return code;
-}
-
-static unsigned char*
-mono_emit_stack_alloc (guchar *code, MonoInst* tree)
-{
- int sreg = tree->sreg1;
-#ifdef PLATFORM_WIN32
- guint8* br[5];
-
- NOT_IMPLEMENTED;
-
- /*
- * Under Windows:
- * If requested stack size is larger than one page,
- * perform stack-touch operation
- */
- /*
- * Generate stack probe code.
- * Under Windows, it is necessary to allocate one page at a time,
- * "touching" stack after each successful sub-allocation. This is
- * because of the way stack growth is implemented - there is a
- * guard page before the lowest stack page that is currently commited.
- * Stack normally grows sequentially so OS traps access to the
- * guard page and commits more pages when needed.
- */
- amd64_test_reg_imm (code, sreg, ~0xFFF);
- br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
-
- br[2] = code; /* loop */
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
- amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
- amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
- amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
- br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
- amd64_patch (br[3], br[2]);
- amd64_test_reg_reg (code, sreg, sreg);
- br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
- amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
-
- br[1] = code; x86_jump8 (code, 0);
-
- amd64_patch (br[0], code);
- amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
- amd64_patch (br[1], code);
- amd64_patch (br[4], code);
-#else /* PLATFORM_WIN32 */
- amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
-#endif
if (tree->flags & MONO_INST_INIT) {
int offset = 0;
if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
{
CallInfo *cinfo;
- guint32 offset, quad;
+ guint32 quad;
/* Move return value to the target register */
/* FIXME: do this in the local reg allocator */
case OP_LCALL:
case OP_LCALL_REG:
case OP_LCALL_MEMBASE:
- if (ins->dreg != AMD64_RAX)
- amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
+ g_assert (ins->dreg == AMD64_RAX);
break;
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE:
- /* FIXME: optimize this */
- offset = mono_spillvar_offset_float (cfg, 0);
if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
if (use_sse2)
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
else {
- amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
- amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
+ /* FIXME: optimize this */
+ amd64_movss_membase_reg (code, AMD64_RSP, -8, AMD64_XMM0);
+ amd64_fld_membase (code, AMD64_RSP, -8, FALSE);
}
}
else {
amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
else {
- amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
- amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
+ /* FIXME: optimize this */
+ amd64_movsd_membase_reg (code, AMD64_RSP, -8, AMD64_XMM0);
+ amd64_fld_membase (code, AMD64_RSP, -8, TRUE);
}
}
break;
/* This is the opposite of the code in emit_prolog */
+ if (sig->ret->type != MONO_TYPE_VOID) {
+ if ((cinfo->ret.storage == ArgInIReg) && (cfg->ret->opcode != OP_REGVAR)) {
+ amd64_mov_reg_membase (code, cinfo->ret.reg, cfg->ret->inst_basereg, cfg->ret->inst_offset, 8);
+ }
+ }
+
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
MonoType *arg_type;
break;
}
}
+ else {
+ g_assert (ainfo->storage == ArgInIReg);
+
+ amd64_mov_reg_reg (code, ainfo->reg, inst->dreg, 8);
+ }
}
g_free (cinfo);
if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
MonoProfileCoverageInfo *cov = cfg->coverage_info;
- g_assert (!mono_compile_aot);
+ g_assert (!cfg->compile_aot);
cpos += 6;
cov->data [bb->dfn].cil_code = bb->cil_code;
break;
case OP_STORE_MEMBASE_IMM:
case OP_STOREI8_MEMBASE_IMM:
- if (amd64_is_imm32 (ins->inst_imm))
- amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
- else {
- amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
- amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
- }
+ g_assert (amd64_is_imm32 (ins->inst_imm));
+ amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
break;
case CEE_LDIND_I:
amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
- if (amd64_is_imm32 (ins->inst_offset)) {
- amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
- }
- else {
- amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
- amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
- }
+ g_assert (amd64_is_imm32 (ins->inst_offset));
+ amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
break;
case OP_LOADI4_MEMBASE:
amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
case OP_LOADI2_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
break;
+ case OP_AMD64_LOADI8_MEMINDEX:
+ amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8);
+ break;
case CEE_CONV_I1:
amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
break;
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
- amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
- amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
- } else {
- amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
- }
+ g_assert (amd64_is_imm32 (ins->inst_imm));
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
break;
case OP_X86_COMPARE_REG_MEMBASE:
amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
case CEE_BREAK:
amd64_breakpoint (code);
break;
-
case OP_ADDCC:
case CEE_ADD:
amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
break;
case CEE_MUL:
+ case OP_LMUL:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MUL_IMM:
- amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
+ case OP_LMUL_IMM:
+ case OP_IMUL_IMM: {
+ guint32 size = (ins->opcode == OP_IMUL_IMM) ? 4 : 8;
+
+ switch (ins->inst_imm) {
+ case 2:
+ /* MOV r1, r2 */
+ /* ADD r1, r1 */
+ if (ins->dreg != ins->sreg1)
+ amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, size);
+ amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+ break;
+ case 3:
+ /* LEA r1, [r2 + r2*2] */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+ break;
+ case 5:
+ /* LEA r1, [r2 + r2*4] */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ break;
+ case 6:
+ /* LEA r1, [r2 + r2*2] */
+ /* ADD r1, r1 */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+ amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+ break;
+ case 9:
+ /* LEA r1, [r2 + r2*8] */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
+ break;
+ case 10:
+ /* LEA r1, [r2 + r2*4] */
+ /* ADD r1, r1 */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ amd64_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+ break;
+ case 12:
+ /* LEA r1, [r2 + r2*2] */
+ /* SHL r1, 2 */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+ amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
+ break;
+ case 25:
+ /* LEA r1, [r2 + r2*4] */
+ /* LEA r1, [r1 + r1*4] */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
+ break;
+ case 100:
+ /* LEA r1, [r2 + r2*4] */
+ /* SHL r1, 2 */
+ /* LEA r1, [r1 + r1*4] */
+ amd64_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ amd64_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
+ amd64_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
+ break;
+ default:
+ amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, size);
+ break;
+ }
break;
+ }
case CEE_DIV:
+ case OP_LDIV:
amd64_cdq (code);
amd64_div_reg (code, ins->sreg2, TRUE);
break;
case CEE_DIV_UN:
+ case OP_LDIV_UN:
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_reg (code, ins->sreg2, FALSE);
break;
- case OP_DIV_IMM:
- g_assert (amd64_is_imm32 (ins->inst_imm));
- amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
- amd64_cdq (code);
- amd64_div_reg (code, ins->sreg2, TRUE);
- break;
case CEE_REM:
+ case OP_LREM:
amd64_cdq (code);
amd64_div_reg (code, ins->sreg2, TRUE);
break;
case CEE_REM_UN:
+ case OP_LREM_UN:
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
amd64_div_reg (code, ins->sreg2, FALSE);
break;
- case OP_REM_IMM:
- g_assert (amd64_is_imm32 (ins->inst_imm));
- amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
- amd64_cdq (code);
- amd64_div_reg (code, ins->sreg2, TRUE);
+ case OP_LMUL_OVF:
+ amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
case CEE_OR:
amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
case OP_IMUL:
amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
break;
- case OP_IMUL_IMM:
- amd64_imul_reg_reg_imm_size (code, ins->dreg, ins->sreg1, ins->inst_imm, 4);
- break;
case OP_IMUL_OVF:
amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
- case OP_IMUL_OVF_UN: {
+ case OP_IMUL_OVF_UN:
+ case OP_LMUL_OVF_UN: {
/* the mul operation and the exception check should most likely be split */
int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
+ int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8;
/*g_assert (ins->sreg2 == X86_EAX);
g_assert (ins->dreg == X86_EAX);*/
if (ins->sreg2 == X86_EAX) {
saved_eax = TRUE;
amd64_push_reg (code, X86_EAX);
}
- amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
+ amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size);
non_eax_reg = ins->sreg2;
}
if (ins->dreg == X86_EDX) {
saved_eax = TRUE;
amd64_push_reg (code, X86_EAX);
}
- } else if (ins->dreg != X86_EAX) {
+ } else {
saved_edx = TRUE;
amd64_push_reg (code, X86_EDX);
}
- amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
+ amd64_mul_reg_size (code, non_eax_reg, FALSE, size);
/* save before the check since pop and mov don't change the flags */
if (ins->dreg != X86_EAX)
- amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
+ amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size);
if (saved_edx)
amd64_pop_reg (code, X86_EDX);
if (saved_eax)
}
case OP_IDIV:
amd64_cdq_size (code, 4);
- amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+ amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
break;
case OP_IDIV_UN:
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
- amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
- break;
- case OP_IDIV_IMM:
- amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
- amd64_cdq_size (code, 4);
- amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+ amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
break;
case OP_IREM:
amd64_cdq_size (code, 4);
- amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+ amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
break;
case OP_IREM_UN:
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
- amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
+ amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
break;
- case OP_IREM_IMM:
- amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
- amd64_cdq_size (code, 4);
- amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
- break;
-
case OP_ICOMPARE:
amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
break;
case OP_ICOMPARE_IMM:
amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
break;
-
case OP_IBEQ:
case OP_IBLT:
case OP_IBGT:
case OP_SEXT_I2:
amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
break;
+ case OP_SEXT_I4:
+ amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
case OP_ICONST:
case OP_I8CONST:
if ((((guint64)ins->inst_c0) >> 32) == 0)
case CEE_CONV_I4:
case CEE_CONV_U4:
case OP_MOVE:
- case OP_SETREG:
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
break;
case OP_AMD64_SET_XMMREG_R4: {
code = emit_load_volatile_arguments (cfg, code);
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= sizeof (gpointer);
+ if (cfg->arch.omit_fp) {
+ guint32 save_offset = 0;
+ /* Pop callee-saved registers */
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_mov_reg_membase (code, i, AMD64_RSP, save_offset, 8);
+ save_offset += 8;
+ }
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
+ }
+ else {
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
+ pos -= sizeof (gpointer);
- if (pos)
- amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
+ if (pos)
+ amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
- /* Pop registers in reverse order */
- for (i = AMD64_NREG - 1; i > 0; --i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_pop_reg (code, i);
- }
+ /* Pop registers in reverse order */
+ for (i = AMD64_NREG - 1; i > 0; --i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_pop_reg (code, i);
+ }
+
+ amd64_leave (code);
+ }
- amd64_leave (code);
offset = code - cfg->native_code;
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
- if (mono_compile_aot)
+ if (cfg->compile_aot)
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
else
amd64_set_reg_template (code, AMD64_R11);
amd64_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
break;
case OP_ARGLIST: {
- amd64_lea_membase (code, AMD64_R11, AMD64_RBP, cfg->sig_cookie);
+ amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, cfg->sig_cookie);
amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
break;
}
case CEE_CONV_R4: /* FIXME: change precision */
case CEE_CONV_R8:
if (use_sse2)
- amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
+ amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
else {
amd64_push_reg (code, ins->sreg1);
amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
break;
}
- case OP_LCONV_TO_OVF_I: {
- guint8 *br [3], *label [1];
-
- if (use_sse2)
- g_assert_not_reached ();
-
- /*
- * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
- */
- amd64_test_reg_reg (code, ins->sreg1, ins->sreg1);
-
- /* If the low word top bit is set, see if we are negative */
- br [0] = code; x86_branch8 (code, X86_CC_LT, 0, TRUE);
- /* We are not negative (no top bit set, check for our top word to be zero */
- amd64_test_reg_reg (code, ins->sreg2, ins->sreg2);
- br [1] = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
- label [0] = code;
-
- /* throw exception */
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
- x86_jump32 (code, 0);
-
- amd64_patch (br [0], code);
- /* our top bit is set, check that top word is 0xfffffff */
- amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0xffffffff);
-
- amd64_patch (br [1], code);
- /* nope, emit exception */
- br [2] = code; x86_branch8 (code, X86_CC_NE, 0, TRUE);
- amd64_patch (br [2], label [0]);
-
- if (ins->dreg != ins->sreg1)
- amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
- break;
- }
case CEE_CONV_OVF_U4:
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, 0);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_LT, TRUE, "OverflowException");
amd64_push_reg (code, AMD64_R11);
amd64_push_reg (code, AMD64_R11);
amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
}
else
amd64_fchs (code);
break;
case OP_SIN:
- if (use_sse2)
- g_assert_not_reached ();
- amd64_fsin (code);
- amd64_fldz (code);
- amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+ if (use_sse2) {
+ EMIT_SSE2_FPFUNC (code, fsin, ins->dreg, ins->sreg1);
+ }
+ else {
+ amd64_fsin (code);
+ amd64_fldz (code);
+ amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+ }
break;
case OP_COS:
- if (use_sse2)
- g_assert_not_reached ();
- amd64_fcos (code);
- amd64_fldz (code);
- amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+ if (use_sse2) {
+ EMIT_SSE2_FPFUNC (code, fcos, ins->dreg, ins->sreg1);
+ }
+ else {
+ amd64_fcos (code);
+ amd64_fldz (code);
+ amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+ }
break;
case OP_ABS:
- if (use_sse2)
- g_assert_not_reached ();
- amd64_fabs (code);
+ if (use_sse2) {
+ EMIT_SSE2_FPFUNC (code, fabs, ins->dreg, ins->sreg1);
+ }
+ else
+ amd64_fabs (code);
break;
case OP_TAN: {
/*
amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
break;
case OP_SQRT:
- if (use_sse2)
- g_assert_not_reached ();
- amd64_fsqrt (code);
+ if (use_sse2) {
+ EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1);
+ }
+ else
+ amd64_fsqrt (code);
break;
case OP_X86_FPOP:
if (!use_sse2)
}
case OP_FCOMPARE:
if (use_sse2) {
- amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
+ /*
+ * The two arguments are swapped because the fbranch instructions
+ * depend on this for the non-sse case to work.
+ */
+ amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
break;
}
if (cfg->opt & MONO_OPT_FCMOV) {
*/
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
if (use_sse2)
- amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
+ amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
else {
amd64_fcomip (code, 1);
amd64_fstp (code, 0);
guchar *unordered_check;
amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
if (use_sse2)
- amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
+ amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
else {
amd64_fcomip (code, 1);
amd64_fstp (code, 0);
if (ins->dreg != AMD64_RAX)
amd64_pop_reg (code, AMD64_RAX);
break;
+ case OP_FCLT_MEMBASE:
+ case OP_FCGT_MEMBASE:
+ case OP_FCLT_UN_MEMBASE:
+ case OP_FCGT_UN_MEMBASE:
+ case OP_FCEQ_MEMBASE: {
+ guchar *unordered_check, *jump_to_end;
+ int x86_cond;
+ g_assert (use_sse2);
+
+ amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
+ amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
+
+ switch (ins->opcode) {
+ case OP_FCEQ_MEMBASE:
+ x86_cond = X86_CC_EQ;
+ break;
+ case OP_FCLT_MEMBASE:
+ case OP_FCLT_UN_MEMBASE:
+ x86_cond = X86_CC_LT;
+ break;
+ case OP_FCGT_MEMBASE:
+ case OP_FCGT_UN_MEMBASE:
+ x86_cond = X86_CC_GT;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ unordered_check = code;
+ x86_branch8 (code, X86_CC_P, 0, FALSE);
+ amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
+
+ switch (ins->opcode) {
+ case OP_FCEQ_MEMBASE:
+ case OP_FCLT_MEMBASE:
+ case OP_FCGT_MEMBASE:
+ amd64_patch (unordered_check, code);
+ break;
+ case OP_FCLT_UN_MEMBASE:
+ case OP_FCGT_UN_MEMBASE:
+ jump_to_end = code;
+ x86_jump8 (code, 0);
+ amd64_patch (unordered_check, code);
+ amd64_inc_reg (code, ins->dreg);
+ amd64_patch (jump_to_end, code);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
case OP_FBEQ:
if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
guchar *jump = code;
case CEE_CKFINITE: {
if (use_sse2) {
/* Transfer value to the fp stack */
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
}
amd64_fstp (code, 0);
}
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
+ if (use_sse2)
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
break;
}
case OP_TLS_GET: {
amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
break;
}
+ case OP_MEMORY_BARRIER: {
+ /* Not needed on amd64 */
+ break;
+ }
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8: {
+ int dreg = ins->dreg;
+ guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
+
+ if (dreg == ins->inst_basereg)
+ dreg = AMD64_R11;
+
+ if (dreg != ins->sreg2)
+ amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
+
+ x86_prefix (code, X86_LOCK_PREFIX);
+ amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
+
+ if (dreg != ins->dreg)
+ amd64_mov_reg_reg (code, ins->dreg, dreg, size);
+
+ break;
+ }
+ case OP_ATOMIC_ADD_NEW_I4:
+ case OP_ATOMIC_ADD_NEW_I8: {
+ int dreg = ins->dreg;
+ guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
+
+ if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
+ dreg = AMD64_R11;
+
+ amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
+ amd64_prefix (code, X86_LOCK_PREFIX);
+ amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
+ /* dreg contains the old value, add with sreg2 value */
+ amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
+
+ if (ins->dreg != dreg)
+ amd64_mov_reg_reg (code, ins->dreg, dreg, size);
+
+ break;
+ }
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8: {
+ guchar *br[2];
+ int sreg2 = ins->sreg2;
+ int breg = ins->inst_basereg;
+ guint32 size = (ins->opcode == OP_ATOMIC_EXCHANGE_I4) ? 4 : 8;
+
+ /*
+ * See http://msdn.microsoft.com/msdnmag/issues/0700/Win32/ for
+ * an explanation of how this works.
+ */
+
+ /* cmpxchg uses eax as comperand, need to make sure we can use it
+ * hack to overcome limits in x86 reg allocator
+ * (req: dreg == eax and sreg2 != eax and breg != eax)
+ */
+ if (ins->dreg != AMD64_RAX)
+ amd64_push_reg (code, AMD64_RAX);
+
+ /* We need the EAX reg for the cmpxchg */
+ if (ins->sreg2 == AMD64_RAX) {
+ amd64_push_reg (code, AMD64_RDX);
+ amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RAX, size);
+ sreg2 = AMD64_RDX;
+ }
+
+ if (breg == AMD64_RAX) {
+ amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, size);
+ breg = AMD64_R11;
+ }
+
+ amd64_mov_reg_membase (code, AMD64_RAX, breg, ins->inst_offset, size);
+
+ br [0] = code; amd64_prefix (code, X86_LOCK_PREFIX);
+ amd64_cmpxchg_membase_reg_size (code, breg, ins->inst_offset, sreg2, size);
+ br [1] = code; amd64_branch8 (code, X86_CC_NE, -1, FALSE);
+ amd64_patch (br [1], br [0]);
+
+ if (ins->dreg != AMD64_RAX) {
+ amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
+ amd64_pop_reg (code, AMD64_RAX);
+ }
+
+ if (ins->sreg2 != sreg2)
+ amd64_pop_reg (code, AMD64_RDX);
+
+ break;
+ }
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
+ gboolean compile_aot = !run_cctors;
for (patch_info = ji; patch_info; patch_info = patch_info->next) {
unsigned char *ip = patch_info->ip.i + code;
target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
- if (mono_compile_aot) {
+ if (compile_aot) {
switch (patch_info->type) {
case MONO_PATCH_INFO_BB:
case MONO_PATCH_INFO_LABEL:
break;
- default: {
- /* Just to make code run at aot time work */
- const unsigned char **tmp;
-
- mono_domain_lock (domain);
- tmp = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer));
- mono_domain_unlock (domain);
-
- *tmp = target;
- target = (const unsigned char*)(guint64)((guint8*)tmp - (guint8*)ip);
- break;
- }
+ default:
+ /* No need to patch these */
+ continue;
}
}
case MONO_PATCH_INFO_CLASS_INIT: {
/* Might already been changed to a nop */
guint8* ip2 = ip;
- if (mono_compile_aot)
- amd64_call_membase (ip2, AMD64_RIP, 0);
- else {
- amd64_set_reg_template (ip2, GP_SCRATCH_REG);
- amd64_call_reg (ip2, GP_SCRATCH_REG);
- }
+ amd64_call_code (ip2, 0);
break;
}
case MONO_PATCH_INFO_METHOD_REL:
int alloc_size, pos, max_offset, i, quad;
guint8 *code;
CallInfo *cinfo;
+ gint32 lmf_offset = cfg->arch.lmf_offset;
cfg->code_size = MAX (((MonoMethodNormal *)method)->header->code_size * 4, 512);
code = cfg->native_code = g_malloc (cfg->code_size);
- amd64_push_reg (code, AMD64_RBP);
- amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
+ /* Amount of stack space allocated by register saving code */
+ pos = 0;
+
+ /*
+ * The prolog consists of the following parts:
+ * FP present:
+ * - push rbp, mov rbp, rsp
+ * - save callee saved regs using pushes
+ * - allocate frame
+ * - save lmf if needed
+ * FP not present:
+ * - allocate frame
+ * - save lmf if needed
+ * - save callee saved regs using moves
+ */
+
+ if (!cfg->arch.omit_fp) {
+ amd64_push_reg (code, AMD64_RBP);
+ amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
+ }
+
+ /* Save callee saved registers */
+ if (!cfg->arch.omit_fp && !method->save_lmf) {
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_push_reg (code, i);
+ pos += sizeof (gpointer);
+ }
+ }
+
+ alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
+
+ alloc_size -= pos;
+
+ if (cfg->arch.omit_fp)
+ /*
+ * On enter, the stack is misaligned by the the pushing of the return
+ * address. It is either made aligned by the pushing of %rbp, or by
+ * this.
+ */
+ alloc_size += 8;
+
+ cfg->arch.stack_alloc_size = alloc_size;
+
+ /* Allocate stack frame */
+ if (alloc_size) {
+ /* See mono_emit_stack_alloc */
+#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+ guint32 remaining_size = alloc_size;
+ while (remaining_size >= 0x1000) {
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
+ amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
+ remaining_size -= 0x1000;
+ }
+ if (remaining_size)
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
+#else
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
+#endif
+ }
/* Stack alignment check */
#if 0
}
#endif
- alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
- pos = 0;
-
+ /* Save LMF */
if (method->save_lmf) {
- gint32 lmf_offset;
-
- pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
-
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
-
- lmf_offset = - cfg->arch.lmf_offset;
-
/* Save ip */
amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
/* Save fp */
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_RBP, 8);
+ /* Save sp */
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
/* Save method */
/* FIXME: add a relocation for this */
if (IS_IMM32 (cfg->method))
- amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
+ amd64_mov_membase_imm (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), (guint64)cfg->method, 8);
else {
amd64_mov_reg_imm (code, AMD64_R11, cfg->method);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
}
/* Save callee saved regs */
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
- } else {
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
+ }
+
+ /* Save callee saved registers */
+ if (cfg->arch.omit_fp && !method->save_lmf) {
+ gint32 save_area_offset = 0;
+ /* Save caller saved registers after sp is adjusted */
+ /* The registers are saved at the bottom of the frame */
+ /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_push_reg (code, i);
- pos += sizeof (gpointer);
+ amd64_mov_membase_reg (code, AMD64_RSP, save_area_offset, i, 8);
+ save_area_offset += 8;
}
}
- alloc_size -= pos;
-
- if (alloc_size) {
- /* See mono_emit_stack_alloc */
-#ifdef PLATFORM_WIN32
- guint32 remaining_size = alloc_size;
- while (remaining_size >= 0x1000) {
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
- amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
- remaining_size -= 0x1000;
- }
- if (remaining_size)
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, remaining_size);
-#else
- amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, alloc_size);
-#endif
- }
-
/* compute max_offset in order to use short forward jumps */
max_offset = 0;
if (cfg->opt & MONO_OPT_BRANCH) {
}
if (method->save_lmf) {
- gint32 lmf_offset;
-
if (lmf_tls_offset != -1) {
/* Load lmf quicky using the FS register */
x86_prefix (code, X86_FS_PREFIX);
(gpointer)"mono_get_lmf_addr");
}
- lmf_offset = - cfg->arch.lmf_offset;
-
/* Save lmf_addr */
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
/* Save previous_lmf */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
- amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
/* Set new lmf */
- amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
+ amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
}
guint8 *code;
int max_epilog_size = 16;
CallInfo *cinfo;
+ gint32 lmf_offset = cfg->arch.lmf_offset;
if (cfg->method->save_lmf)
max_epilog_size += 256;
pos = 0;
if (method->save_lmf) {
- gint32 lmf_offset = - cfg->arch.lmf_offset;
-
/* Restore previous lmf */
- amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
/* Restore caller saved regs */
+ if (cfg->used_int_regs & (1 << AMD64_RBP)) {
+ amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), 8);
+ }
if (cfg->used_int_regs & (1 << AMD64_RBX)) {
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
+ amd64_mov_reg_membase (code, AMD64_RBX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
}
if (cfg->used_int_regs & (1 << AMD64_R12)) {
- amd64_mov_reg_membase (code, AMD64_R12, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
+ amd64_mov_reg_membase (code, AMD64_R12, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
}
if (cfg->used_int_regs & (1 << AMD64_R13)) {
- amd64_mov_reg_membase (code, AMD64_R13, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
+ amd64_mov_reg_membase (code, AMD64_R13, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
}
if (cfg->used_int_regs & (1 << AMD64_R14)) {
- amd64_mov_reg_membase (code, AMD64_R14, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
+ amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
}
if (cfg->used_int_regs & (1 << AMD64_R15)) {
- amd64_mov_reg_membase (code, AMD64_R15, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
+ amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
}
} else {
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= sizeof (gpointer);
+ if (cfg->arch.omit_fp) {
+ gint32 save_area_offset = 0;
- if (pos) {
- if (pos == - sizeof (gpointer)) {
- /* Only one register, so avoid lea */
- for (i = AMD64_NREG - 1; i > 0; --i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
- }
- }
- else {
- amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_mov_reg_membase (code, i, AMD64_RSP, save_area_offset, 8);
+ save_area_offset += 8;
+ }
+ }
+ else {
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
+ pos -= sizeof (gpointer);
- /* Pop registers in reverse order */
- for (i = AMD64_NREG - 1; i > 0; --i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_pop_reg (code, i);
- }
+ if (pos) {
+ if (pos == - sizeof (gpointer)) {
+ /* Only one register, so avoid lea */
+ for (i = AMD64_NREG - 1; i > 0; --i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
+ }
+ }
+ else {
+ amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
+
+ /* Pop registers in reverse order */
+ for (i = AMD64_NREG - 1; i > 0; --i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_pop_reg (code, i);
+ }
+ }
}
}
}
}
g_free (cinfo);
- amd64_leave (code);
+ if (cfg->arch.omit_fp) {
+ if (cfg->arch.stack_alloc_size)
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
+ } else {
+ amd64_leave (code);
+ }
amd64_ret (code);
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
+ if (cfg->arch.omit_fp) {
+ /*
+ * Encode the stack size into used_int_regs so the exception handler
+ * can access it.
+ */
+ g_assert (cfg->arch.stack_alloc_size < (1 << 16));
+ cfg->used_int_regs |= (1 << 31) | (cfg->arch.stack_alloc_size << 16);
+ }
}
void
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
patch_info->ip.i = code - cfg->native_code;
- if (mono_compile_aot)
+ if (cfg->compile_aot) {
amd64_mov_reg_membase (code, GP_SCRATCH_REG, AMD64_RIP, 0, 8);
- else
- amd64_set_reg_template (code, GP_SCRATCH_REG);
- amd64_call_reg (code, GP_SCRATCH_REG);
+ amd64_call_reg (code, GP_SCRATCH_REG);
+ } else {
+ /* The callee is in memory allocated using the code manager */
+ amd64_call_code (code, 0);
+ }
amd64_mov_reg_imm (buf, AMD64_RSI, (code - cfg->native_code) - throw_ip);
while (buf < buf2)
mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
{
guchar *code = p;
- CallInfo *cinfo;
+ CallInfo *cinfo = NULL;
MonoMethodSignature *sig;
MonoInst *inst;
int i, n, stack_area = 0;
rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
if (IS_REX (rip [0])) {
- reg = amd64_rex_r (rip [0]);
+ reg = amd64_rex_b (rip [0]);
rip ++;
}
else
}
gpointer*
-mono_amd64_get_vcall_slot_addr (guint8* code, guint64 *regs)
+mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
{
guint32 reg;
guint32 disp;
*/
code -= 7;
- if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
- /* call *%reg */
- return NULL;
- }
- else if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
+ /*
+ * A given byte sequence can match more than case here, so we have to be
+ * really careful about the ordering of the cases. Longer sequences
+ * come first.
+ */
+ if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
/* call OFFSET(%rip) */
- return NULL;
+ disp = *(guint32*)(code + 3);
+ return (gpointer*)(code + disp + 7);
}
else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
/* call *[reg+disp32] */
disp = *(guint32*)(code + 3);
//printf ("B: [%%r%d+0x%x]\n", reg, disp);
}
+ else if (code [2] == 0xe8) {
+ /* call <ADDR> */
+ return NULL;
+ }
+ else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
+ /* call *%reg */
+ return NULL;
+ }
else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
/* call *[reg+disp8] */
if (IS_REX (code [3]))
/* R11 is clobbered by the trampoline code */
g_assert (reg != AMD64_R11);
- return (gpointer)((regs [reg]) + disp);
+ return (gpointer)(((guint64)(regs [reg])) + disp);
}
-/*
- * Support for fast access to the thread-local lmf structure using the GS
- * segment register on NPTL + kernel 2.6.x.
- */
+gpointer*
+mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
+{
+ guint32 reg;
+ guint32 disp;
-static gboolean tls_offset_inited = FALSE;
+ code -= 10;
-/* code should be simply return <tls var>; */
-static int
-read_tls_offset_from_method (void* method)
-{
- guint8 *code = (guint8*)method;
+ if (IS_REX (code [0]) && (code [1] == 0x8b) && (code [3] == 0x48) && (code [4] == 0x8b) && (code [5] == 0x40) && (code [7] == 0x48) && (code [8] == 0xff) && (code [9] == 0xd0)) {
+ /* mov REG, %rax; mov <OFFSET>(%rax), %rax; call *%rax */
+ reg = amd64_rex_b (code [0]) + amd64_modrm_rm (code [2]);
+ disp = code [6];
- /*
- * Determine the offset of mono_lfm_addr inside the TLS structures
- * by disassembling the function above.
- */
- /* This is generated by gcc 3.3.2 */
- if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
- (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
- (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
- (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
- (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
- (code [15] == 0x80)) {
- return *(gint32*)&(code [16]);
- } else if
- /* This is generated by gcc-3.3.2 with -O=2 */
- /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
- ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
- (code [3] == 0x04) && (code [4] == 0x25) &&
- (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
- (code [16] == 0xc3)) {
- return *(gint32*)&(code [12]);
- } else if
- /* This is generated by gcc-3.4.1 */
- ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
- (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
- (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
- (code [13] == 0xc9) && (code [14] == 0xc3)) {
- return *(gint32*)&(code [9]);
- } else if
- /* This is generated by gcc-3.4.1 with -O=2 */
- ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
- (code [3] == 0x04) && (code [4] == 0x25)) {
- return *(gint32*)&(code [5]);
+ if (reg == AMD64_RAX)
+ return NULL;
+ else
+ return (gpointer*)(((guint64)(regs [reg])) + disp);
}
- return -1;
+ return NULL;
}
+/*
+ * Support for fast access to the thread-local lmf structure using the GS
+ * segment register on NPTL + kernel 2.6.x.
+ */
+
+static gboolean tls_offset_inited = FALSE;
+
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- pthread_t self = pthread_self();
- pthread_attr_t attr;
- void *staddr = NULL;
- size_t stsize = 0;
- struct sigaltstack sa;
-#endif
-
if (!tls_offset_inited) {
tls_offset_inited = TRUE;
- lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
- appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
- //thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
+ appdomain_tls_offset = mono_domain_get_tls_offset ();
+ lmf_tls_offset = mono_get_lmf_tls_offset ();
+ thread_tls_offset = mono_thread_get_tls_offset ();
}
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-
- /* Determine stack boundaries */
- if (!mono_running_on_valgrind ()) {
-#ifdef HAVE_PTHREAD_GETATTR_NP
- pthread_getattr_np( self, &attr );
-#else
-#ifdef HAVE_PTHREAD_ATTR_GET_NP
- pthread_attr_get_np( self, &attr );
-#elif defined(sun)
- pthread_attr_init( &attr );
- pthread_attr_getstacksize( &attr, &stsize );
-#else
-#error "Not implemented"
-#endif
-#endif
-#ifndef sun
- pthread_attr_getstack( &attr, &staddr, &stsize );
-#endif
- }
-
- /*
- * staddr seems to be wrong for the main thread, so we keep the value in
- * tls->end_of_stack
- */
- tls->stack_size = stsize;
-
- /* Setup an alternate signal stack */
- tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
- tls->signal_stack_size = SIGNAL_STACK_SIZE;
-
- sa.ss_sp = tls->signal_stack;
- sa.ss_size = SIGNAL_STACK_SIZE;
- sa.ss_flags = SS_ONSTACK;
- sigaltstack (&sa, NULL);
-#endif
}
void
mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
{
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- struct sigaltstack sa;
-
- sa.ss_sp = tls->signal_stack;
- sa.ss_size = SIGNAL_STACK_SIZE;
- sa.ss_flags = SS_DISABLE;
- sigaltstack (&sa, NULL);
-
- if (tls->signal_stack)
- g_free (tls->signal_stack);
-#endif
}
void
mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
{
MonoCallInst *call = (MonoCallInst*)inst;
- int out_reg = param_regs [0];
- guint64 regpair;
+ CallInfo * cinfo = get_call_info (inst->signature, FALSE);
if (vt_reg != -1) {
- CallInfo * cinfo = get_call_info (inst->signature, FALSE);
MonoInst *vtarg;
if (cinfo->ret.storage == ArgValuetypeInReg) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
}
else {
- MONO_INST_NEW (cfg, vtarg, OP_SETREG);
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
vtarg->sreg1 = vt_reg;
vtarg->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, vtarg);
- regpair = (((guint64)out_reg) << 32) + vtarg->dreg;
- call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
-
- out_reg = param_regs [1];
+ mono_call_inst_add_outarg_reg (call, vtarg->dreg, cinfo->ret.reg, FALSE);
}
-
- g_free (cinfo);
}
/* add the this argument */
if (this_reg != -1) {
MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_SETREG);
+ MONO_INST_NEW (cfg, this, OP_MOVE);
this->type = this_type;
this->sreg1 = this_reg;
this->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, this);
- regpair = (((guint64)out_reg) << 32) + this->dreg;
- call->out_ireg_args = g_slist_append (call->out_ireg_args, (gpointer)(regpair));
+ mono_call_inst_add_outarg_reg (call, this->dreg, cinfo->args [0].reg, FALSE);
}
+
+ g_free (cinfo);
}
MonoInst*
{
MonoInst *ins = NULL;
- if (use_sse2)
- return NULL;
-
if (cmethod->klass == mono_defaults.math_class) {
if (strcmp (cmethod->name, "Sin") == 0) {
MONO_INST_NEW (cfg, ins, OP_SIN);
MONO_INST_NEW (cfg, ins, OP_COS);
ins->inst_i0 = args [0];
} else if (strcmp (cmethod->name, "Tan") == 0) {
+ if (use_sse2)
+ return ins;
MONO_INST_NEW (cfg, ins, OP_TAN);
ins->inst_i0 = args [0];
} else if (strcmp (cmethod->name, "Atan") == 0) {
+ if (use_sse2)
+ return ins;
MONO_INST_NEW (cfg, ins, OP_ATAN);
ins->inst_i0 = args [0];
} else if (strcmp (cmethod->name, "Sqrt") == 0) {
ins->inst_i1 = args [1];
}
#endif
+ } else if (cmethod->klass == mono_defaults.thread_class &&
+ strcmp (cmethod->name, "MemoryBarrier") == 0) {
+ MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
+ } else if(cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+ (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+
+ if (strcmp (cmethod->name, "Increment") == 0) {
+ MonoInst *ins_iconst;
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
+ MONO_INST_NEW (cfg, ins, opcode);
+ MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
+ ins_iconst->inst_c0 = 1;
+
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = ins_iconst;
+ } else if (strcmp (cmethod->name, "Decrement") == 0) {
+ MonoInst *ins_iconst;
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
+ MONO_INST_NEW (cfg, ins, opcode);
+ MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
+ ins_iconst->inst_c0 = -1;
+
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = ins_iconst;
+ } else if (strcmp (cmethod->name, "Add") == 0) {
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = args [1];
+ } else if (strcmp (cmethod->name, "Exchange") == 0) {
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_EXCHANGE_I4;
+ else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
+ (fsig->params [0]->type == MONO_TYPE_I) ||
+ (fsig->params [0]->type == MONO_TYPE_OBJECT))
+ opcode = OP_ATOMIC_EXCHANGE_I8;
+ else
+ return NULL;
+
+ MONO_INST_NEW (cfg, ins, opcode);
+
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = args [1];
+ } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
+ /* 64 bit reads are already atomic */
+ MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
+ ins->inst_i0 = args [0];
+ }
+
+ /*
+ * Can't implement CompareExchange methods this way since they have
+ * three arguments.
+ */
}
return ins;