* Authors:
* Paolo Molaro (lupus@ximian.com)
* Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
*
* (C) 2003 Ximian, Inc.
*/
#include "mini.h"
#include <string.h>
+#include <math.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
+#include <mono/metadata/profiler-private.h>
+#include <mono/utils/mono-math.h>
+#include "trace.h"
#include "mini-x86.h"
#include "inssel.h"
-#include "regset.h"
#include "cpu-pentium.h"
-int mono_exc_esp_offset = 0;
+static gint lmf_tls_offset = -1;
+
+#ifdef PLATFORM_WIN32
+/* Under windows, the default pinvoke calling convention is stdcall */
+#define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
+#else
+#define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
+#endif
+
+#define SIGNAL_STACK_SIZE (64 * 1024)
const char*
mono_arch_regname (int reg) {
case X86_EBX: return "%ebx";
case X86_ECX: return "%ecx";
case X86_EDX: return "%edx";
- case X86_ESP: return "%esp";
- case X86_EBP: return "%ebp";
+ case X86_ESP: return "%esp"; case X86_EBP: return "%ebp";
case X86_EDI: return "%edi";
case X86_ESI: return "%esi";
}
return "unknown";
}
-typedef struct {
- guint16 size;
- guint16 offset;
- guint8 pad;
-} MonoJitArgumentInfo;
-
/*
- * arch_get_argument_info:
+ * mono_arch_get_argument_info:
* @csig: a method signature
* @param_count: the number of parameters to consider
* @arg_info: an array to store the result infos
*
* Returns the size of the activation frame.
*/
-static int
-arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+int
+mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
int size, align, pad;
return frame_size;
}
-static int indent_level = 0;
-
-static void indent (int diff) {
- int v = indent_level;
- while (v-- > 0) {
- printf (". ");
- }
- indent_level += diff;
-}
-
-static void
-enter_method (MonoMethod *method, char *ebp)
-{
- int i, j;
- MonoClass *class;
- MonoObject *o;
- MonoJitArgumentInfo *arg_info;
- MonoMethodSignature *sig;
- char *fname;
-
- fname = mono_method_full_name (method, TRUE);
- indent (1);
- printf ("ENTER: %s(", fname);
- g_free (fname);
-
- if (((int)ebp & (MONO_ARCH_FRAME_ALIGNMENT - 1)) != 0) {
- g_error ("unaligned stack detected (%p)", ebp);
- }
-
- sig = method->signature;
-
- arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
-
- arch_get_argument_info (sig, sig->param_count, arg_info);
-
- if (MONO_TYPE_ISSTRUCT (method->signature->ret)) {
- g_assert (!method->signature->ret->byref);
-
- printf ("VALUERET:%p, ", *((gpointer *)(ebp + 8)));
- }
-
- if (method->signature->hasthis) {
- gpointer *this = (gpointer *)(ebp + arg_info [0].offset);
- if (method->klass->valuetype) {
- printf ("value:%p, ", *this);
- } else {
- o = *((MonoObject **)this);
-
- if (o) {
- class = o->vtable->klass;
-
- if (class == mono_defaults.string_class) {
- printf ("this:[STRING:%p:%s], ", o, mono_string_to_utf8 ((MonoString *)o));
- } else {
- printf ("this:%p[%s.%s], ", o, class->name_space, class->name);
- }
- } else
- printf ("this:NULL, ");
- }
- }
-
- for (i = 0; i < method->signature->param_count; ++i) {
- gpointer *cpos = (gpointer *)(ebp + arg_info [i + 1].offset);
- int size = arg_info [i + 1].size;
-
- MonoType *type = method->signature->params [i];
-
- if (type->byref) {
- printf ("[BYREF:%p], ", *cpos);
- } else switch (type->type) {
-
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- printf ("%p, ", (gpointer)*((int *)(cpos)));
- break;
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- printf ("%d, ", *((int *)(cpos)));
- break;
- case MONO_TYPE_STRING: {
- MonoString *s = *((MonoString **)cpos);
- if (s) {
- g_assert (((MonoObject *)s)->vtable->klass == mono_defaults.string_class);
- printf ("[STRING:%p:%s], ", s, mono_string_to_utf8 (s));
- } else
- printf ("[STRING:null], ");
- break;
- }
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT: {
- o = *((MonoObject **)cpos);
- if (o) {
- class = o->vtable->klass;
-
- if (class == mono_defaults.string_class) {
- printf ("[STRING:%p:%s], ", o, mono_string_to_utf8 ((MonoString *)o));
- } else if (class == mono_defaults.int32_class) {
- printf ("[INT32:%p:%d], ", o, *(gint32 *)((char *)o + sizeof (MonoObject)));
- } else
- printf ("[%s.%s:%p], ", class->name_space, class->name, o);
- } else {
- printf ("%p, ", *((gpointer *)(cpos)));
- }
- break;
- }
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_SZARRAY:
- printf ("%p, ", *((gpointer *)(cpos)));
- break;
- case MONO_TYPE_I8:
- case MONO_TYPE_U8:
- printf ("0x%016llx, ", *((gint64 *)(cpos)));
- break;
- case MONO_TYPE_R4:
- printf ("%f, ", *((float *)(cpos)));
- break;
- case MONO_TYPE_R8:
- printf ("%f, ", *((double *)(cpos)));
- break;
- case MONO_TYPE_VALUETYPE:
- printf ("[");
- for (j = 0; j < size; j++)
- printf ("%02x,", *((guint8*)cpos +j));
- printf ("], ");
- break;
- default:
- printf ("XX, ");
- }
- }
-
- printf (")\n");
-}
-
-static void
-leave_method (MonoMethod *method, ...)
-{
- MonoType *type;
- char *fname;
- va_list ap;
-
- va_start(ap, method);
-
- fname = mono_method_full_name (method, TRUE);
- indent (-1);
- printf ("LEAVE: %s", fname);
- g_free (fname);
-
- type = method->signature->ret;
-
-handle_enum:
- switch (type->type) {
- case MONO_TYPE_VOID:
- break;
- case MONO_TYPE_BOOLEAN: {
- int eax = va_arg (ap, int);
- if (eax)
- printf ("TRUE:%d", eax);
- else
- printf ("FALSE");
-
- break;
- }
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U: {
- int eax = va_arg (ap, int);
- printf ("EAX=%d", eax);
- break;
- }
- case MONO_TYPE_STRING: {
- MonoString *s = va_arg (ap, MonoString *);
-;
- if (s) {
- g_assert (((MonoObject *)s)->vtable->klass == mono_defaults.string_class);
- printf ("[STRING:%p:%s]", s, mono_string_to_utf8 (s));
- } else
- printf ("[STRING:null], ");
- break;
- }
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT: {
- MonoObject *o = va_arg (ap, MonoObject *);
-
- if (o) {
- if (o->vtable->klass == mono_defaults.boolean_class) {
- printf ("[BOOLEAN:%p:%d]", o, *((guint8 *)o + sizeof (MonoObject)));
- } else if (o->vtable->klass == mono_defaults.int32_class) {
- printf ("[INT32:%p:%d]", o, *((gint32 *)((char *)o + sizeof (MonoObject))));
- } else if (o->vtable->klass == mono_defaults.int64_class) {
- printf ("[INT64:%p:%lld]", o, *((gint64 *)((char *)o + sizeof (MonoObject))));
- } else
- printf ("[%s.%s:%p]", o->vtable->klass->name_space, o->vtable->klass->name, o);
- } else
- printf ("[OBJECT:%p]", o);
-
- break;
- }
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_SZARRAY: {
- gpointer p = va_arg (ap, gpointer);
- printf ("EAX=%p", p);
- break;
- }
- case MONO_TYPE_I8: {
- gint64 l = va_arg (ap, gint64);
- printf ("EAX/EDX=0x%16llx", l);
- break;
- }
- case MONO_TYPE_U8: {
- gint64 l = va_arg (ap, gint64);
- printf ("EAX/EDX=0x%16llx", l);
- break;
- }
- case MONO_TYPE_R8: {
- double f = va_arg (ap, double);
- printf ("FP=%f\n", f);
- break;
- }
- case MONO_TYPE_VALUETYPE:
- if (type->data.klass->enumtype) {
- type = type->data.klass->enum_basetype;
- goto handle_enum;
- } else {
- guint8 *p = va_arg (ap, gpointer);
- int j, size, align;
- size = mono_type_size (type, &align);
- printf ("[");
- for (j = 0; p && j < size; j++)
- printf ("%02x,", p [j]);
- printf ("]");
- }
- break;
- default:
- printf ("(unknown return type %x)", method->signature->ret->type);
- }
-
- printf ("\n");
-}
-
static const guchar cpuid_impl [] = {
0x55, /* push %ebp */
0x89, 0xe5, /* mov %esp,%ebp */
return 0;
}
+/*
+ * Initialize the cpu to execute managed code.
+ */
+void
+mono_arch_cpu_init (void)
+{
+ guint16 fpcw;
+
+ /* spec compliance requires running with double precision */
+ __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
+ fpcw &= ~X86_FPCW_PRECC_MASK;
+ fpcw |= X86_FPCW_PREC_DOUBLE;
+ __asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
+ __asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
+
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
guint32
-mono_arch_cpu_optimizazions (void)
+mono_arch_cpu_optimizazions (guint32 *exclude_mask)
{
int eax, ebx, ecx, edx;
guint32 opts = 0;
-
+
+ *exclude_mask = 0;
/* Feature Flags function, flags returned in EDX. */
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 15)) {
opts |= MONO_OPT_CMOV;
if (edx & 1)
opts |= MONO_OPT_FCMOV;
- }
+ else
+ *exclude_mask |= MONO_OPT_FCMOV;
+ } else
+ *exclude_mask |= MONO_OPT_CMOV;
}
return opts;
}
case MONO_TYPE_U4:
case MONO_TYPE_I:
case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
return TRUE;
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
case MONO_TYPE_CLASS:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
- return FALSE;
+ return TRUE;
case MONO_TYPE_VALUETYPE:
if (t->data.klass->enumtype)
return is_regsize_var (t->data.klass->enum_basetype);
MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
/* unused vars */
- if (vmv->range.first_use.abs_pos > vmv->range.last_use.abs_pos)
+ if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
continue;
- if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
+ if ((ins->flags & (MONO_INST_IS_DEAD|MONO_INST_VOLATILE|MONO_INST_INDIRECT)) ||
+ (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
continue;
- /* we can only allocate 32 bit values */
- if (is_regsize_var (ins->inst_vtype)) {
+ /* we dont allocate I1 to registers because there is no simply way to sign extend
+ * 8bit quantities in caller saved registers on x86 */
+ if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) ||
+ (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
+ (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
- vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
+ vars = g_list_prepend (vars, vmv);
}
}
+ vars = mono_varlist_sort (cfg, vars, 0);
+
return vars;
}
return regs;
}
+
+/*
+ * mono_arch_regalloc_cost:
+ *
+ * Return the cost, in number of memory references, of the action of
+ * allocating the variable VMV into a register during global register
+ * allocation.
+ */
+guint32
+mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
+{
+ MonoInst *ins = cfg->varinfo [vmv->idx];
+
+ if (cfg->method->save_lmf)
+ /* The register is already saved */
+ return (ins->opcode == OP_ARG) ? 1 : 0;
+ else
+ /* push+pop+possible load if it is an argument */
+ return (ins->opcode == OP_ARG) ? 3 : 2;
+}
/*
* Set var information according to the calling convention. X86 version.
header = ((MonoMethodNormal *)m->method)->header;
sig = m->method->signature;
-
+
offset = 8;
curinst = 0;
if (MONO_TYPE_ISSTRUCT (sig->ret)) {
curinst++;
}
+ if (sig->call_convention == MONO_CALL_VARARG) {
+ m->sig_cookie = offset;
+ offset += sizeof (gpointer);
+ }
+
for (i = 0; i < sig->param_count; ++i) {
inst = m->varinfo [curinst];
if (inst->opcode != OP_REGVAR) {
}
offset = 0;
+
/* reserve space to save LMF and caller saved registers */
- offset += sizeof (MonoLMF);
- /* reserve space to store the esp */
- offset += sizeof (gpointer);
+ if (m->method->save_lmf) {
+ offset += sizeof (MonoLMF);
+ } else {
+ if (m->used_int_regs & (1 << X86_EBX)) {
+ offset += 4;
+ }
- /* this is a global constant */
- mono_exc_esp_offset = -offset;
+ if (m->used_int_regs & (1 << X86_EDI)) {
+ offset += 4;
+ }
+
+ if (m->used_int_regs & (1 << X86_ESI)) {
+ offset += 4;
+ }
+ }
for (i = curinst; i < m->num_varinfo; ++i) {
inst = m->varinfo [i];
/* inst->unused indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
- if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype))
+ if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
else
size = mono_type_size (inst->inst_vtype, &align);
inst->inst_offset = -offset;
//g_print ("allocating local %d to %d\n", i, -offset);
}
- offset += 3;
- offset &= ~3;
-
+ offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
+ offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
/* change sign? */
m->stack_offset = -offset;
*/
MonoCallInst*
mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
- MonoInst *arg, *in, **rev_args;
+ MonoInst *arg, *in;
MonoMethodSignature *sig;
int i, n, stack_size, type;
MonoType *ptype;
+ stack_size = 0;
+ /* add the vararg cookie before the non-implicit args */
+ if (call->signature->call_convention == MONO_CALL_VARARG) {
+ MonoInst *sig_arg;
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+ MONO_INST_NEW (cfg, arg, OP_OUTARG);
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->inst_p0 = call->signature;
+ arg->inst_left = sig_arg;
+ arg->type = STACK_PTR;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+ stack_size += sizeof (gpointer);
+ }
sig = call->signature;
n = sig->param_count + sig->hasthis;
- rev_args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
-
- if (sig->ret && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
- //g_warning ("long value returned");
- }
+
if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
- stack_size = 4;
- else
- stack_size = 0;
+ stack_size += sizeof (gpointer);
for (i = 0; i < n; ++i) {
if (is_virtual && i == 0) {
/* the argument will be attached to the call instrucion */
- rev_args [n - 1] = arg = NULL;
in = call->args [i];
stack_size += 4;
} else {
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
- rev_args [n - i - 1] = arg;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
if (i >= sig->hasthis) {
ptype = sig->params [i - sig->hasthis];
if (ptype->byref)
size = mono_type_native_stack_size (&in->klass->byval_arg, NULL);
else
size = mono_type_stack_size (&in->klass->byval_arg, NULL);
+
stack_size += size;
arg->opcode = OP_OUTARG_VT;
arg->klass = in->klass;
goto handle_enum;
}
break;
+ case MONO_TYPE_TYPEDBYREF:
+ stack_size += sizeof (MonoTypedRef);
+ arg->opcode = OP_OUTARG_VT;
+ arg->klass = in->klass;
+ arg->unused = sig->pinvoke;
+ arg->inst_imm = sizeof (MonoTypedRef);
+ break;
+ case MONO_TYPE_GENERICINST:
+ type = ptype->data.generic_inst->generic_type->type;
+ goto handle_enum;
+
default:
- g_warning ("unknown type 0x%02x\n", type);
- g_assert_not_reached ();
+ g_error ("unknown type 0x%02x in mono_arch_call_opcode\n", type);
}
} else {
/* the this argument */
}
}
}
- /* they need to be pushed in reverse order */
- call->args = rev_args;
+ /* if the function returns a struct, the called method already does a ret $0x4 */
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
+ stack_size -= 4;
call->stack_usage = stack_size;
/*
* should set more info in call, such as the stack space
x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
} else { \
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
- x86_branch32 (code, cond, 0, sign); \
+ if ((cfg->opt & MONO_OPT_BRANCH) && \
+ x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
+ x86_branch8 (code, cond, 0, sign); \
+ else \
+ x86_branch32 (code, cond, 0, sign); \
} \
} else { \
if (ins->inst_true_bb->native_offset) { \
x86_fnstsw (code); \
} while (0);
+/* FIXME: Add more instructions */
+#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM))
+
static void
peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
case OP_ICONST:
/* reg = 0 -> XOR (reg, reg) */
/* XOR sets cflags on x86, so we cant do it always */
- if (ins->inst_c0 == 0 && ins->next &&
- (ins->next->opcode == CEE_BR)) {
+ if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
ins->opcode = CEE_XOR;
ins->sreg1 = ins->dreg;
ins->sreg2 = ins->dreg;
if (ins->dreg != ins->sreg1) {
ins->opcode = OP_MOVE;
} else {
- last_ins->next = ins->next;
- ins = ins->next;
+ last_ins->next = ins->next;
+ ins = ins->next;
continue;
}
}
break;
case OP_COMPARE_IMM:
- /* OP_COMPARE_IMM (reg, 0) --> OP_X86_TEST_NULL (reg) */
+ /* OP_COMPARE_IMM (reg, 0)
+ * -->
+ * OP_X86_TEST_NULL (reg)
+ */
if (ins->inst_imm == 0 && ins->next &&
(ins->next->opcode == CEE_BEQ || ins->next->opcode == CEE_BNE_UN ||
ins->next->opcode == OP_CEQ)) {
ins->opcode = OP_X86_TEST_NULL;
}
break;
+ case OP_X86_COMPARE_MEMBASE_IMM:
+ /*
+ * OP_STORE_MEMBASE_REG reg, offset(basereg)
+ * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
+ * -->
+ * OP_STORE_MEMBASE_REG reg, offset(basereg)
+ * OP_COMPARE_IMM reg, imm
+ *
+ * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
+ */
+ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = OP_COMPARE_IMM;
+ ins->sreg1 = last_ins->sreg1;
+
+ /* check if we can remove cmp reg,0 with test null */
+ if (ins->inst_imm == 0 && ins->next &&
+ (ins->next->opcode == CEE_BEQ || ins->next->opcode == CEE_BNE_UN ||
+ ins->next->opcode == OP_CEQ)) {
+ ins->opcode = OP_X86_TEST_NULL;
+ }
+ }
+
+ break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
/*
- * OP_STORE_MEMBASE_REG reg, offset(basereg)
- * OP_LOAD_MEMBASE offset(basereg), reg
+ * Note: if reg1 = reg2 the load op is removed
+ *
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_MOVE reg1, reg2
*/
if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
|| last_ins->opcode == OP_STORE_MEMBASE_REG) &&
/*
* Note: reg1 must be different from the basereg in the second load
+ * Note: if reg1 = reg2 is equal then second load is removed
+ *
* OP_LOAD_MEMBASE offset(basereg), reg1
* OP_LOAD_MEMBASE offset(basereg), reg2
* -->
break;
case OP_LOADU1_MEMBASE:
case OP_LOADI1_MEMBASE:
- /*
- * FIXME: Missing explanation
- */
+ /*
+ * Note: if reg1 = reg2 the load op is removed
+ *
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_MOVE reg1, reg2
+ */
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
break;
case OP_LOADU2_MEMBASE:
case OP_LOADI2_MEMBASE:
- /*
- * FIXME: Missing explanation
- */
+ /*
+ * Note: if reg1 = reg2 the load op is removed
+ *
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_MOVE reg1, reg2
+ */
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
case CEE_CONV_I4:
case CEE_CONV_U4:
case OP_MOVE:
- /*
+ /*
+ * Removes:
+ *
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
continue;
}
/*
+ * Removes:
+ *
* OP_MOVE sreg, dreg
* OP_MOVE dreg, sreg
*/
#define DEBUG(a) if (cfg->verbose_level > 1) a
//#define DEBUG(a)
+
+/*
+ * returns the offset used by spillvar. It allocates a new
+ * spill variable if necessary.
+ */
+static int
+mono_spillvar_offset (MonoCompile *cfg, int spillvar)
+{
+ MonoSpillInfo **si, *info;
+ int i = 0;
+
+ si = &cfg->spill_info;
+
+ while (i <= spillvar) {
+
+ if (!*si) {
+ *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
+ info->next = NULL;
+ cfg->stack_offset -= sizeof (gpointer);
+ info->offset = cfg->stack_offset;
+ }
+
+ if (i == spillvar)
+ return (*si)->offset;
+
+ i++;
+ si = &(*si)->next;
+ }
+
+ g_assert_not_reached ();
+ return 0;
+}
+
+/*
+ * returns the offset used by spillvar. It allocates a new
+ * spill float variable if necessary.
+ * (same as mono_spillvar_offset but for float)
+ */
+static int
+mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
+{
+ MonoSpillInfo **si, *info;
+ int i = 0;
+
+ si = &cfg->spill_info_float;
+
+ while (i <= spillvar) {
+
+ if (!*si) {
+ *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
+ info->next = NULL;
+ cfg->stack_offset -= sizeof (double);
+ info->offset = cfg->stack_offset;
+ }
+
+ if (i == spillvar)
+ return (*si)->offset;
+
+ i++;
+ si = &(*si)->next;
+ }
+
+ g_assert_not_reached ();
+ return 0;
+}
+
+/*
+ * Creates a store for spilled floating point items
+ */
+static MonoInst*
+create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
+{
+ MonoInst *store;
+ MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
+ store->sreg1 = reg;
+ store->inst_destbasereg = X86_EBP;
+ store->inst_offset = mono_spillvar_offset_float (cfg, spill);
+
+ DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08x(%%sp)) (from %d)\n", spill, store->inst_offset, reg));
+ return store;
+}
+
+/*
+ * Creates a load for spilled floating point items
+ */
+static MonoInst*
+create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
+{
+ MonoInst *load;
+ MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
+ load->dreg = reg;
+ load->inst_basereg = X86_EBP;
+ load->inst_offset = mono_spillvar_offset_float (cfg, spill);
+
+ DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08x(%%sp)) (from %d)\n", spill, load->inst_offset, reg));
+ return load;
+}
+
#define reg_is_freeable(r) ((r) >= 0 && (r) <= 7 && X86_IS_CALLEE ((r)))
typedef struct {
int killed_in;
int last_use;
int prev_use;
+ int flags; /* used to track fp spill/load */
} RegTrack;
-static const char*const * ins_spec = pentium;
+static const char*const * ins_spec = pentium_desc;
static void
print_ins (int i, MonoInst *ins)
MonoInst *load;
int i, sel, spill;
- DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
+ DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
/* exclude the registers in the current instruction */
if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
if (ins->sreg1 >= MONO_MAX_IREGS)
regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
else
regmask &= ~ (1 << ins->sreg1);
- DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
+ DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
}
if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
if (ins->sreg2 >= MONO_MAX_IREGS)
regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
else
regmask &= ~ (1 << ins->sreg2);
- DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
+ DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
}
if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
regmask &= ~ (1 << ins->dreg);
- DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
+ DEBUG (g_print ("\t\texcluding dreg %s\n", mono_arch_regname (ins->dreg)));
}
- DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
+ DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
g_assert (regmask); /* need at least a register we can free */
sel = -1;
/* we should track prev_use and spill the register that's farther */
for (i = 0; i < MONO_MAX_IREGS; ++i) {
if (regmask & (1 << i)) {
sel = i;
- DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
+ DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
break;
}
}
}
load->next = ins->next;
ins->next = load;
- DEBUG (g_print ("SPILLED LOAD (%d at 0x%08x(%%ebp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
+ DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08x(%%ebp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
g_assert (i == sel);
store->next = ins->next;
ins->next = store;
}
- DEBUG (g_print ("SPILLED STORE (%d at 0x%08x(%%ebp)) R%d (from %s)\n", spill, store->inst_offset, prev_reg, mono_arch_regname (reg)));
+ DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08x(%%ebp)) R%d (from %s)\n", spill, store->inst_offset, prev_reg, mono_arch_regname (reg)));
return store;
}
item->data = to_insert;
}
+
#if 0
static int
alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
}
#endif
-#include "cprop.c"
+/* flags used in reginfo->flags */
+enum {
+ MONO_X86_FP_NEEDS_LOAD_SPILL = 1 << 0,
+ MONO_X86_FP_NEEDS_SPILL = 1 << 1,
+ MONO_X86_FP_NEEDS_LOAD = 1 << 2,
+ MONO_X86_REG_NOT_ECX = 1 << 3,
+ MONO_X86_REG_EAX = 1 << 4,
+ MONO_X86_REG_EDX = 1 << 5,
+ MONO_X86_REG_ECX = 1 << 6
+};
+
+static int
+mono_x86_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
+{
+ int val;
+ int test_mask = dest_mask;
+
+ if (flags & MONO_X86_REG_EAX)
+ test_mask &= (1 << X86_EAX);
+ else if (flags & MONO_X86_REG_EDX)
+ test_mask &= (1 << X86_EDX);
+ else if (flags & MONO_X86_REG_ECX)
+ test_mask &= (1 << X86_ECX);
+ else if (flags & MONO_X86_REG_NOT_ECX)
+ test_mask &= ~ (1 << X86_ECX);
+
+ val = mono_regstate_alloc_int (cfg->rs, test_mask);
+ if (val >= 0 && test_mask != dest_mask)
+ DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
+
+ if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
+ DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
+ val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << X86_ECX)));
+ }
+
+ if (val < 0) {
+ val = mono_regstate_alloc_int (cfg->rs, dest_mask);
+ if (val < 0)
+ val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg);
+ }
+
+ return val;
+}
+
+
+/*#include "cprop.c"*/
/*
* Local register allocation.
InstList *tmp, *reversed = NULL;
const char *spec;
guint32 src1_mask, src2_mask, dest_mask;
+ GList *fspill_list = NULL;
+ int fspill = 0;
if (!bb->code)
return;
rs->next_vireg = bb->max_ireg;
rs->next_vfreg = bb->max_freg;
mono_regstate_assign (rs);
- reginfo = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vireg);
- reginfof = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vfreg);
+ reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
+ reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
rs->ifree_mask = X86_CALLEE_REGS;
ins = bb->code;
- if (cfg->opt & MONO_OPT_COPYPROP)
- local_copy_prop (cfg, ins);
-
+ /*if (cfg->opt & MONO_OPT_COPYPROP)
+ local_copy_prop (cfg, ins);*/
+
i = 1;
- fpcount = 0; /* FIXME: track fp stack utilization */
+ fpcount = 0;
DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
/* forward pass on the instructions to collect register liveness info */
while (ins) {
spec = ins_spec [ins->opcode];
+
DEBUG (print_ins (i, ins));
+
if (spec [MONO_INST_SRC1]) {
- if (spec [MONO_INST_SRC1] == 'f')
+ if (spec [MONO_INST_SRC1] == 'f') {
+ GList *spill;
reginfo1 = reginfof;
+
+ spill = g_list_first (fspill_list);
+ if (spill && fpcount < MONO_MAX_FREGS) {
+ reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
+ fspill_list = g_list_remove (fspill_list, spill->data);
+ } else
+ fpcount--;
+ }
else
reginfo1 = reginfo;
reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
reginfo1 [ins->sreg1].last_use = i;
+ if (spec [MONO_INST_SRC1] == 'L') {
+ /* The virtual register is allocated sequentially */
+ reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
+ reginfo1 [ins->sreg1 + 1].last_use = i;
+ if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
+ reginfo1 [ins->sreg1 + 1].born_in = i;
+
+ reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
+ reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
+ }
} else {
ins->sreg1 = -1;
}
if (spec [MONO_INST_SRC2]) {
- if (spec [MONO_INST_SRC2] == 'f')
+ if (spec [MONO_INST_SRC2] == 'f') {
+ GList *spill;
reginfo2 = reginfof;
+ spill = g_list_first (fspill_list);
+ if (spill) {
+ reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
+ fspill_list = g_list_remove (fspill_list, spill->data);
+ if (fpcount >= MONO_MAX_FREGS) {
+ fspill++;
+ fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
+ reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
+ }
+ } else
+ fpcount--;
+ }
else
reginfo2 = reginfo;
reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
reginfo2 [ins->sreg2].last_use = i;
+ if (spec [MONO_INST_SRC2] == 'L') {
+ /* The virtual register is allocated sequentially */
+ reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
+ reginfo2 [ins->sreg2 + 1].last_use = i;
+ if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
+ reginfo2 [ins->sreg2 + 1].born_in = i;
+ }
+ if (spec [MONO_INST_CLOB] == 's') {
+ reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
+ reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
+ }
} else {
ins->sreg2 = -1;
}
if (spec [MONO_INST_DEST]) {
- if (spec [MONO_INST_DEST] == 'f')
+ if (spec [MONO_INST_DEST] == 'f') {
reginfod = reginfof;
+ if (fpcount >= MONO_MAX_FREGS) {
+ reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
+ fspill++;
+ fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
+ fpcount--;
+ }
+ fpcount++;
+ }
else
reginfod = reginfo;
if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
reginfod [ins->dreg].last_use = i;
if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
reginfod [ins->dreg].born_in = i;
- if (spec [MONO_INST_DEST] == 'l') {
- /* result in eax:edx, the virtual register is allocated sequentially */
+ if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
+ /* The virtual register is allocated sequentially */
reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
reginfod [ins->dreg + 1].last_use = i;
if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
reginfod [ins->dreg + 1].born_in = i;
+
+ reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
+ reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
}
} else {
ins->dreg = -1;
}
+
reversed = inst_list_prepend (cfg->mempool, reversed, ins);
++i;
ins = ins->next;
}
+ // todo: check if we have anything left on fp stack, in verify mode?
+ fspill = 0;
+
DEBUG (print_regtrack (reginfo, rs->next_vireg));
DEBUG (print_regtrack (reginfof, rs->next_vfreg));
tmp = reversed;
while (tmp) {
- int prev_dreg, prev_sreg1, prev_sreg2;
+ int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
dest_mask = src1_mask = src2_mask = X86_CALLEE_REGS;
--i;
ins = tmp->data;
spec = ins_spec [ins->opcode];
+ prev_dreg = -1;
+ clob_dreg = -1;
DEBUG (g_print ("processing:"));
DEBUG (print_ins (i, ins));
if (spec [MONO_INST_CLOB] == 's') {
* copy from this to ECX.
*/
if (val == X86_ECX && ins->dreg != ins->sreg2) {
- int new_dest = mono_regstate_alloc_int (rs, dest_mask);
- if (new_dest < 0)
- new_dest = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
+ int new_dest;
+ new_dest = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
g_assert (new_dest >= 0);
+ DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
+
+ rs->isymbolic [new_dest] = ins->dreg;
+ rs->iassign [ins->dreg] = new_dest;
+ clob_dreg = ins->dreg;
ins->dreg = new_dest;
create_copy_ins (cfg, X86_ECX, new_dest, ins);
need_ecx_spill = FALSE;
* shift instruction clobbers the first operand.
*/
MonoInst *copy = create_copy_ins (cfg, ins->dreg, val, NULL);
+ DEBUG (g_print ("\tclob:s moved sreg1 from R%d to R%d\n", val, ins->dreg));
insert_before_ins (ins, tmp, copy);
}
val = rs->iassign [ins->sreg2];
}
} else if (spec [MONO_INST_CLOB] == 'd') { /* division */
int dest_reg = X86_EAX;
- if (spec [MONO_INST_DEST] == 'd')
+ int clob_reg = X86_EDX;
+ if (spec [MONO_INST_DEST] == 'd') {
dest_reg = X86_EDX; /* reminder */
+ clob_reg = X86_EAX;
+ }
val = rs->iassign [ins->dreg];
if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
if (val < -1) {
/* the register gets spilled after this inst */
int spill = -val -1;
- dest_mask = 1 << (dest_reg == X86_EAX? X86_EDX: X86_EAX);
+ dest_mask = 1 << clob_reg;
prev_dreg = ins->dreg;
val = mono_regstate_alloc_int (rs, dest_mask);
if (val < 0)
}
} else {
DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
+ prev_dreg = ins->dreg;
rs->iassign [ins->dreg] = dest_reg;
rs->isymbolic [dest_reg] = ins->dreg;
ins->dreg = dest_reg;
//DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
if (val != dest_reg) { /* force a copy */
create_copy_ins (cfg, val, dest_reg, ins);
+ if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
+ DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
+ get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg]);
+ mono_regstate_free_int (rs, dest_reg);
+ }
}
}
+ if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= 8)) {
+ DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
+ get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg]);
+ mono_regstate_free_int (rs, clob_reg);
+ }
src1_mask = 1 << X86_EAX;
src2_mask = 1 << X86_ECX;
}
if (spec [MONO_INST_DEST] == 'l') {
- if (!(rs->ifree_mask & (1 << X86_EAX))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [X86_EAX]));
+ int hreg;
+ val = rs->iassign [ins->dreg];
+ /* check special case when dreg have been moved from ecx (clob shift) */
+ if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
+ hreg = clob_dreg + 1;
+ else
+ hreg = ins->dreg + 1;
+
+ /* base prev_dreg on fixed hreg, handle clob case */
+ val = hreg - 1;
+
+ if (val != rs->isymbolic [X86_EAX] && !(rs->ifree_mask & (1 << X86_EAX))) {
+ DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [X86_EAX]));
get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_EAX]);
mono_regstate_free_int (rs, X86_EAX);
}
- if (!(rs->ifree_mask & (1 << X86_EDX))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [X86_EDX]));
+ if (hreg != rs->isymbolic [X86_EDX] && !(rs->ifree_mask & (1 << X86_EDX))) {
+ DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [X86_EDX]));
get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_EDX]);
mono_regstate_free_int (rs, X86_EDX);
}
}
- /* update for use with FP regs... */
- if (spec [MONO_INST_DEST] != 'f' && ins->dreg >= MONO_MAX_IREGS) {
+
+ /* Track dreg */
+ if (spec [MONO_INST_DEST] == 'f') {
+ if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
+ GList *spill_node;
+ MonoInst *store;
+ spill_node = g_list_first (fspill_list);
+ g_assert (spill_node);
+
+ store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
+ insert_before_ins (ins, tmp, store);
+ fspill_list = g_list_remove (fspill_list, spill_node->data);
+ fspill--;
+ }
+ } else if (spec [MONO_INST_DEST] == 'L') {
+ int hreg;
val = rs->iassign [ins->dreg];
- prev_dreg = ins->dreg;
+ /* check special case when dreg have been moved from ecx (clob shift) */
+ if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
+ hreg = clob_dreg + 1;
+ else
+ hreg = ins->dreg + 1;
+
+ /* base prev_dreg on fixed hreg, handle clob case */
+ prev_dreg = hreg - 1;
+
if (val < 0) {
int spill = 0;
if (val < -1) {
/* the register gets spilled after this inst */
spill = -val -1;
}
- val = mono_regstate_alloc_int (rs, dest_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
+ val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
+ rs->iassign [ins->dreg] = val;
+ if (spill)
+ create_spilled_store (cfg, spill, val, prev_dreg, ins);
+ }
+
+ DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
+
+ rs->isymbolic [val] = hreg - 1;
+ ins->dreg = val;
+
+ val = rs->iassign [hreg];
+ if (val < 0) {
+ int spill = 0;
+ if (val < -1) {
+ /* the register gets spilled after this inst */
+ spill = -val -1;
+ }
+ val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
+ rs->iassign [hreg] = val;
+ if (spill)
+ create_spilled_store (cfg, spill, val, hreg, ins);
+ }
+
+ DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
+ rs->isymbolic [val] = hreg;
+ /* save reg allocating into unused */
+ ins->unused = val;
+
+ /* check if we can free our long reg */
+ if (reg_is_freeable (val) && hreg >= 0 && reginfo [hreg].born_in >= i) {
+ DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
+ mono_regstate_free_int (rs, val);
+ }
+ }
+ else if (ins->dreg >= MONO_MAX_IREGS) {
+ int hreg;
+ val = rs->iassign [ins->dreg];
+ if (spec [MONO_INST_DEST] == 'l') {
+ /* check special case when dreg have been moved from ecx (clob shift) */
+ if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
+ hreg = clob_dreg + 1;
+ else
+ hreg = ins->dreg + 1;
+
+ /* base prev_dreg on fixed hreg, handle clob case */
+ prev_dreg = hreg - 1;
+ } else
+ prev_dreg = ins->dreg;
+
+ if (val < 0) {
+ int spill = 0;
+ if (val < -1) {
+ /* the register gets spilled after this inst */
+ spill = -val -1;
+ }
+ val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
rs->iassign [ins->dreg] = val;
if (spill)
create_spilled_store (cfg, spill, val, prev_dreg, ins);
DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
rs->isymbolic [val] = prev_dreg;
ins->dreg = val;
+ /* handle cases where lreg needs to be eax:edx */
if (spec [MONO_INST_DEST] == 'l') {
+ /* check special case when dreg have been moved from ecx (clob shift) */
int hreg = prev_dreg + 1;
val = rs->iassign [hreg];
if (val < 0) {
/* the register gets spilled after this inst */
spill = -val -1;
}
- val = mono_regstate_alloc_int (rs, dest_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
+ val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
rs->iassign [hreg] = val;
if (spill)
create_spilled_store (cfg, spill, val, hreg, ins);
}
DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
rs->isymbolic [val] = hreg;
- /* FIXME:? ins->dreg = val; */
if (ins->dreg == X86_EAX) {
if (val != X86_EDX)
create_copy_ins (cfg, val, X86_EDX, ins);
} else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != X86_EDX && spec [MONO_INST_CLOB] != 'd') {
create_copy_ins (cfg, ins->dreg, X86_EDX, ins);
}
- } else {
- prev_dreg = -1;
}
if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
mono_regstate_free_int (rs, ins->dreg);
}
- if (spec [MONO_INST_SRC1] != 'f' && ins->sreg1 >= MONO_MAX_IREGS) {
+ /* put src1 in EAX if it needs to be */
+ if (spec [MONO_INST_SRC1] == 'a') {
+ if (!(rs->ifree_mask & (1 << X86_EAX))) {
+ DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [X86_EAX]));
+ get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_EAX]);
+ mono_regstate_free_int (rs, X86_EAX);
+ }
+ /* force-set sreg1 */
+ rs->iassign [ins->sreg1] = X86_EAX;
+ rs->isymbolic [X86_EAX] = ins->sreg1;
+ ins->sreg1 = X86_EAX;
+ rs->ifree_mask &= ~ (1 << X86_EAX);
+ }
+
+ /* Track sreg1 */
+ if (spec [MONO_INST_SRC1] == 'f') {
+ if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
+ MonoInst *load;
+ MonoInst *store = NULL;
+
+ if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
+ GList *spill_node;
+ spill_node = g_list_first (fspill_list);
+ g_assert (spill_node);
+
+ store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);
+ fspill_list = g_list_remove (fspill_list, spill_node->data);
+ }
+
+ fspill++;
+ fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
+ load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
+ insert_before_ins (ins, tmp, load);
+ if (store)
+ insert_before_ins (load, tmp, store);
+ }
+ } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
+ /* force source to be same as dest */
+ rs->iassign [ins->sreg1] = ins->dreg;
+ rs->iassign [ins->sreg1 + 1] = ins->unused;
+
+ DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
+ DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
+
+ ins->sreg1 = ins->dreg;
+ /*
+ * No need for saving the reg, we know that src1=dest in this cases
+ * ins->inst_c0 = ins->unused;
+ */
+
+ /* make sure that we remove them from free mask */
+ rs->ifree_mask &= ~ (1 << ins->dreg);
+ rs->ifree_mask &= ~ (1 << ins->unused);
+ }
+ else if (ins->sreg1 >= MONO_MAX_IREGS) {
val = rs->iassign [ins->sreg1];
prev_sreg1 = ins->sreg1;
if (val < 0) {
DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
} else {
//g_assert (val == -1); /* source cannot be spilled */
- val = mono_regstate_alloc_int (rs, src1_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
+ val = mono_x86_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
rs->iassign [ins->sreg1] = val;
DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
}
ins->next = copy;
}
}
- if (spec [MONO_INST_SRC2] != 'f' && ins->sreg2 >= MONO_MAX_IREGS) {
+ /* track sreg2 */
+ if (spec [MONO_INST_SRC2] == 'f') {
+ if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
+ MonoInst *load;
+ MonoInst *store = NULL;
+
+ if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
+ GList *spill_node;
+
+ spill_node = g_list_first (fspill_list);
+ g_assert (spill_node);
+ if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
+ spill_node = g_list_next (spill_node);
+
+ store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
+ fspill_list = g_list_remove (fspill_list, spill_node->data);
+ }
+
+ fspill++;
+ fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
+ load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
+ insert_before_ins (ins, tmp, load);
+ if (store)
+ insert_before_ins (load, tmp, store);
+ }
+ }
+ else if (ins->sreg2 >= MONO_MAX_IREGS) {
val = rs->iassign [ins->sreg2];
prev_sreg2 = ins->sreg2;
if (val < 0) {
/* the register gets spilled after this inst */
spill = -val -1;
}
- val = mono_regstate_alloc_int (rs, src2_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
+ val = mono_x86_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
rs->iassign [ins->sreg2] = val;
DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
if (spill)
DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
mono_regstate_free_int (rs, ins->sreg2);
}*/
-
+
//DEBUG (print_ins (i, ins));
/* this may result from a insert_before call */
if (!tmp->next)
bb->code = tmp->data;
tmp = tmp->next;
}
+
+ g_free (reginfo);
+ g_free (reginfof);
+ g_list_free (fspill_list);
}
static unsigned char*
x86_pop_reg (code, X86_EDX); \
x86_pop_reg (code, X86_EAX);
+/* benchmark and set based on cpu */
+#define LOOP_ALIGNMENT 8
+#define bb_is_loop_start(bb) ((bb)->nesting && ((bb)->in_count == 1))
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
if (cfg->opt & MONO_OPT_PEEPHOLE)
peephole_pass (cfg, bb);
-#if 0
- /*
- * various stratgies to align BBs. Using real loop detection or simply
- * aligning every block leads to more consistent benchmark results,
- * but usually slows down the code
- * we should do the alignment outside this function or we should adjust
- * bb->native offset as well or the code is effectively slowed down!
- */
- /* align all blocks */
-// if ((pad = (cfg->code_len & (align - 1)))) {
- /* poor man loop start detection */
-// if (bb->code && bb->in_count && bb->in_bb [0]->cil_code > bb->cil_code && (pad = (cfg->code_len & (align - 1)))) {
- /* consider real loop detection and nesting level */
-// if (bb->loop_blocks && bb->nesting < 3 && (pad = (cfg->code_len & (align - 1)))) {
- /* consider real loop detection */
- if (bb->loop_blocks && (pad = (cfg->code_len & (align - 1)))) {
- pad = align - pad;
- x86_padding (code, pad);
- cfg->code_len += pad;
- bb->native_offset = cfg->code_len;
+ if (cfg->opt & MONO_OPT_LOOP) {
+ int pad, align = LOOP_ALIGNMENT;
+ /* set alignment depending on cpu */
+ if (bb_is_loop_start (bb) && (pad = (cfg->code_len & (align - 1)))) {
+ pad = align - pad;
+ /*g_print ("adding %d pad at %x to loop in %s\n", pad, cfg->code_len, cfg->method->name);*/
+ x86_padding (code, pad);
+ cfg->code_len += pad;
+ bb->native_offset = cfg->code_len;
+ }
}
-#endif
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
cpos = bb->max_offset;
- if (mono_trace_coverage) {
- MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
+ if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
+ MonoProfileCoverageInfo *cov = cfg->coverage_info;
g_assert (!mono_compile_aot);
cpos += 6;
- // fixme: make this work with inlining
- g_assert_not_reached ();
- //if (bb->cil_code)
- //cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
+ cov->data [bb->dfn].cil_code = bb->cil_code;
/* this is not thread save, but good enough */
- /* fixme: howto handle overflows? */
x86_inc_mem (code, &cov->data [bb->dfn].count);
}
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
+ case OP_BIGMUL:
+ x86_mul_reg (code, ins->sreg2, TRUE);
+ break;
+ case OP_BIGMUL_UN:
+ x86_mul_reg (code, ins->sreg2, FALSE);
+ break;
+ case OP_X86_SETEQ_MEMBASE:
+ x86_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
+ break;
case OP_STOREI1_MEMBASE_IMM:
x86_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
break;
case OP_X86_ADD_MEMBASE_IMM:
x86_alu_membase_imm (code, X86_ADD, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
break;
+ case OP_X86_ADD_MEMBASE:
+ x86_alu_reg_membase (code, X86_ADD, ins->sreg1, ins->sreg2, ins->inst_offset);
+ break;
case OP_X86_SUB_MEMBASE_IMM:
x86_alu_membase_imm (code, X86_SUB, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
break;
+ case OP_X86_SUB_MEMBASE:
+ x86_alu_reg_membase (code, X86_SUB, ins->sreg1, ins->sreg2, ins->inst_offset);
+ break;
case OP_X86_INC_MEMBASE:
x86_inc_membase (code, ins->inst_basereg, ins->inst_offset);
break;
case OP_X86_DEC_REG:
x86_dec_reg (code, ins->dreg);
break;
+ case OP_X86_MUL_MEMBASE:
+ x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
+ break;
case CEE_BREAK:
x86_breakpoint (code);
break;
case OP_SHL_IMM:
x86_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
break;
+ case OP_LSHL: {
+ guint8 *jump_to_end;
+
+ /* handle shifts below 32 bits */
+ x86_shld_reg (code, ins->unused, ins->sreg1);
+ x86_shift_reg (code, X86_SHL, ins->sreg1);
+
+ x86_test_reg_imm (code, X86_ECX, 32);
+ jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
+
+ /* handle shift over 32 bit */
+ x86_mov_reg_reg (code, ins->unused, ins->sreg1, 4);
+ x86_clear_reg (code, ins->sreg1);
+
+ x86_patch (jump_to_end, code);
+ }
+ break;
+ case OP_LSHR: {
+ guint8 *jump_to_end;
+
+ /* handle shifts below 32 bits */
+ x86_shrd_reg (code, ins->sreg1, ins->unused);
+ x86_shift_reg (code, X86_SAR, ins->unused);
+
+ x86_test_reg_imm (code, X86_ECX, 32);
+ jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+
+ /* handle shifts over 31 bits */
+ x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
+ x86_shift_reg_imm (code, X86_SAR, ins->unused, 31);
+
+ x86_patch (jump_to_end, code);
+ }
+ break;
+ case OP_LSHR_UN: {
+ guint8 *jump_to_end;
+
+ /* handle shifts below 32 bits */
+ x86_shrd_reg (code, ins->sreg1, ins->unused);
+ x86_shift_reg (code, X86_SHR, ins->unused);
+
+ x86_test_reg_imm (code, X86_ECX, 32);
+ jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+
+ /* handle shifts over 31 bits */
+ x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
+ x86_shift_reg_imm (code, X86_SHR, ins->unused, 31);
+
+ x86_patch (jump_to_end, code);
+ }
+ break;
+ case OP_LSHL_IMM:
+ if (ins->inst_imm >= 32) {
+ x86_mov_reg_reg (code, ins->unused, ins->sreg1, 4);
+ x86_clear_reg (code, ins->sreg1);
+ x86_shift_reg_imm (code, X86_SHL, ins->unused, ins->inst_imm - 32);
+ } else {
+ x86_shld_reg_imm (code, ins->unused, ins->sreg1, ins->inst_imm);
+ x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm);
+ }
+ break;
+ case OP_LSHR_IMM:
+ if (ins->inst_imm >= 32) {
+ x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
+ x86_shift_reg_imm (code, X86_SAR, ins->unused, 0x1f);
+ x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32);
+ } else {
+ x86_shrd_reg_imm (code, ins->sreg1, ins->unused, ins->inst_imm);
+ x86_shift_reg_imm (code, X86_SAR, ins->unused, ins->inst_imm);
+ }
+ break;
+ case OP_LSHR_UN_IMM:
+ if (ins->inst_imm >= 32) {
+ x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
+ x86_clear_reg (code, ins->unused);
+ x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32);
+ } else {
+ x86_shrd_reg_imm (code, ins->sreg1, ins->unused, ins->inst_imm);
+ x86_shift_reg_imm (code, X86_SHR, ins->unused, ins->inst_imm);
+ }
+ break;
case CEE_NOT:
x86_not_reg (code, ins->sreg1);
break;
case CEE_NEG:
x86_neg_reg (code, ins->sreg1);
break;
+ case OP_SEXT_I1:
+ x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
+ break;
+ case OP_SEXT_I2:
+ x86_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
+ break;
case CEE_MUL:
x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
break;
}
x86_mul_reg (code, non_eax_reg, FALSE);
/* save before the check since pop and mov don't change the flags */
+ if (ins->dreg != X86_EAX)
+ x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
if (saved_edx)
x86_pop_reg (code, X86_EDX);
if (saved_eax)
x86_pop_reg (code, X86_EAX);
- if (ins->dreg != X86_EAX)
- x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
break;
}
x86_mov_reg_imm (code, ins->dreg, 0);
break;
case CEE_CONV_I4:
- case CEE_CONV_U4:
case OP_MOVE:
x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
break;
+ case CEE_CONV_U4:
+ g_assert_not_reached ();
case CEE_JMP: {
/*
* Note: this 'frame destruction' logic is useful for tail calls, too.
+ * Keep in sync with the code in emit_epilog.
*/
- int pos = -4;
- if (cfg->used_int_regs & (1 << X86_EBX)) {
- x86_mov_reg_membase (code, X86_EBX, X86_EBP, pos, 4);
+ int pos = 0;
+
+ /* FIXME: no tracing support... */
+ if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
+ code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
+ /* reset offset to make max_len work */
+ offset = code - cfg->native_code;
+
+ g_assert (!cfg->method->save_lmf);
+
+ if (cfg->used_int_regs & (1 << X86_EBX))
pos -= 4;
- }
- if (cfg->used_int_regs & (1 << X86_EDI)) {
- x86_mov_reg_membase (code, X86_EDI, X86_EBP, pos, 4);
+ if (cfg->used_int_regs & (1 << X86_EDI))
pos -= 4;
- }
- if (cfg->used_int_regs & (1 << X86_ESI)) {
- x86_mov_reg_membase (code, X86_ESI, X86_EBP, pos, 4);
+ if (cfg->used_int_regs & (1 << X86_ESI))
pos -= 4;
- }
+ if (pos)
+ x86_lea_membase (code, X86_ESP, X86_EBP, pos);
+
+ if (cfg->used_int_regs & (1 << X86_ESI))
+ x86_pop_reg (code, X86_ESI);
+ if (cfg->used_int_regs & (1 << X86_EDI))
+ x86_pop_reg (code, X86_EDI);
+ if (cfg->used_int_regs & (1 << X86_EBX))
+ x86_pop_reg (code, X86_EBX);
+
/* restore ESP/EBP */
x86_leave (code);
offset = code - cfg->native_code;
- mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, ins->inst_p0);
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
x86_jump32 (code, 0);
break;
}
/* ensure ins->sreg1 is not NULL */
x86_alu_membase_imm (code, X86_CMP, ins->sreg1, 0, 0);
break;
+ case OP_ARGLIST: {
+ int hreg = ins->sreg1 == X86_EAX? X86_ECX: X86_EAX;
+ x86_push_reg (code, hreg);
+ x86_lea_membase (code, hreg, X86_EBP, cfg->sig_cookie);
+ x86_mov_membase_reg (code, ins->sreg1, 0, hreg, 4);
+ x86_pop_reg (code, hreg);
+ break;
+ }
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
}
x86_call_code (code, 0);
- if (call->stack_usage)
+ if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
break;
case OP_FCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
x86_call_reg (code, ins->sreg1);
- if (call->stack_usage)
+ if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
break;
case OP_FCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
x86_call_membase (code, ins->sreg1, ins->inst_offset);
- if (call->stack_usage)
+ if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
x86_alu_reg_imm (code, X86_ADD, X86_ESP, call->stack_usage);
break;
case OP_OUTARG:
case OP_X86_LEA:
x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
break;
+ case OP_X86_LEA_MEMBASE:
+ x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
+ break;
case OP_X86_XCHG:
x86_xchg_reg_reg (code, ins->sreg1, ins->sreg2, 4);
break;
x86_call_code (code, 0);
break;
}
- case OP_ENDFILTER:
- if (ins->sreg1 != X86_EAX)
- x86_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
- x86_mov_reg_membase (code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4);
- x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
- x86_ret (code);
- break;
- case CEE_ENDFINALLY:
- /*
- * restore ESP - which can be modified when we allocate value types in the filter
- */
- x86_mov_reg_membase (code, X86_ESP, X86_EBP, mono_exc_esp_offset, 4);
- x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
- x86_ret (code);
- break;
- case OP_HANDLER:
- x86_mov_membase_reg (code, X86_EBP, mono_exc_esp_offset, X86_ESP, 4);
+ case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
x86_call_imm (code, 0);
break;
x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
} else {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
- x86_jump32 (code, 0);
+ if ((cfg->opt & MONO_OPT_BRANCH) &&
+ x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
+ x86_jump8 (code, 0);
+ else
+ x86_jump32 (code, 0);
}
} else {
if (ins->inst_target_bb->native_offset) {
case OP_R8CONST: {
double d = *(double *)ins->inst_p0;
- if (d == 0.0) {
+ if ((d == 0.0) && (mono_signbit (d) == 0)) {
x86_fldz (code);
} else if (d == 1.0) {
x86_fld1 (code);
case OP_R4CONST: {
float f = *(float *)ins->inst_p0;
- if (f == 0.0) {
+ if ((f == 0.0) && (mono_signbit (f) == 0)) {
x86_fldz (code);
} else if (f == 1.0) {
x86_fld1 (code);
case OP_STORER8_MEMBASE_REG:
x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
break;
+ case OP_LOADR8_SPILL_MEMBASE:
+ x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
+ x86_fxch (code, 1);
+ break;
case OP_LOADR8_MEMBASE:
x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
break;
code = emit_float_to_int (cfg, code, ins->dreg, 4, TRUE);
break;
case OP_FCONV_TO_I8:
- /* we defined this instruction to output only to eax:edx */
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
x86_fnstcw_membase(code, X86_ESP, 0);
- x86_mov_reg_membase (code, X86_EAX, X86_ESP, 0, 2);
- x86_alu_reg_imm (code, X86_OR, X86_EAX, 0xc00);
- x86_mov_membase_reg (code, X86_ESP, 2, X86_EAX, 2);
+ x86_mov_reg_membase (code, ins->dreg, X86_ESP, 0, 2);
+ x86_alu_reg_imm (code, X86_OR, ins->dreg, 0xc00);
+ x86_mov_membase_reg (code, X86_ESP, 2, ins->dreg, 2);
x86_fldcw_membase (code, X86_ESP, 2);
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
- x86_pop_reg (code, X86_EAX);
- x86_pop_reg (code, X86_EDX);
+ x86_pop_reg (code, ins->dreg);
+ x86_pop_reg (code, ins->unused);
x86_fldcw_membase (code, X86_ESP, 0);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
break;
break;
case OP_SIN:
x86_fsin (code);
+ x86_fldz (code);
+ x86_fp_op_reg (code, X86_FADD, 1, TRUE);
break;
case OP_COS:
x86_fcos (code);
+ x86_fldz (code);
+ x86_fp_op_reg (code, X86_FADD, 1, TRUE);
break;
case OP_ABS:
x86_fabs (code);
break;
- case OP_TAN:
+ case OP_TAN: {
+ /*
+ * it really doesn't make sense to inline all this code,
+ * it's here just to show that things may not be as simple
+ * as they appear.
+ */
+ guchar *check_pos, *end_tan, *pop_jump;
+ x86_push_reg (code, X86_EAX);
x86_fptan (code);
- break;
+ x86_fnstsw (code);
+ x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
+ check_pos = code;
+ x86_branch8 (code, X86_CC_NE, 0, FALSE);
+ x86_fstp (code, 0); /* pop the 1.0 */
+ end_tan = code;
+ x86_jump8 (code, 0);
+ x86_fldpi (code);
+ x86_fp_op (code, X86_FADD, 0);
+ x86_fxch (code, 1);
+ x86_fprem1 (code);
+ x86_fstsw (code);
+ x86_test_reg_imm (code, X86_EAX, X86_FP_C2);
+ pop_jump = code;
+ x86_branch8 (code, X86_CC_NE, 0, FALSE);
+ x86_fstp (code, 1);
+ x86_fptan (code);
+ x86_patch (pop_jump, code);
+ x86_fstp (code, 0); /* pop the 1.0 */
+ x86_patch (check_pos, code);
+ x86_patch (end_tan, code);
+ x86_fldz (code);
+ x86_fp_op_reg (code, X86_FADD, 1, TRUE);
+ x86_pop_reg (code, X86_EAX);
+ break;
+ }
case OP_ATAN:
+ x86_fld1 (code);
x86_fpatan (code);
+ x86_fldz (code);
+ x86_fp_op_reg (code, X86_FADD, 1, TRUE);
break;
case OP_SQRT:
x86_fsqrt (code);
/* we need to exchange ST(0) with ST(1) */
x86_fxch (code, 1);
- /* this requires a loop, because fprem1 somtimes
+ /* this requires a loop, because fprem somtimes
* returns a partial remainder */
l1 = code;
+ /* looks like MS is using fprem instead of the IEEE compatible fprem1 */
+ /* x86_fprem1 (code); */
x86_fprem (code);
x86_fnstsw (code);
- x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x0400);
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_C2);
l2 = code + 2;
x86_branch8 (code, X86_CC_NE, l1 - l2, FALSE);
}
/* this overwrites EAX */
EMIT_FPCOMPARE(code);
- x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
break;
case OP_FCEQ:
if (cfg->opt & MONO_OPT_FCMOV) {
x86_push_reg (code, X86_EAX);
EMIT_FPCOMPARE(code);
- x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
x86_push_reg (code, X86_EAX);
EMIT_FPCOMPARE(code);
- x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
if (ins->opcode == OP_FCLT_UN) {
guchar *is_not_zero_check, *end_jump;
is_not_zero_check = code;
end_jump = code;
x86_jump8 (code, 0);
x86_patch (is_not_zero_check, code);
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
x86_patch (end_jump, code);
}
x86_push_reg (code, X86_EAX);
EMIT_FPCOMPARE(code);
- x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4500);
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x0100);
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, X86_FP_CC_MASK);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
if (ins->opcode == OP_FCGT_UN) {
guchar *is_not_zero_check, *end_jump;
is_not_zero_check = code;
end_jump = code;
x86_jump8 (code, 0);
x86_patch (is_not_zero_check, code);
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
x86_patch (end_jump, code);
}
break;
case OP_FBEQ:
if (cfg->opt & MONO_OPT_FCMOV) {
+ guchar *jump = code;
+ x86_branch8 (code, X86_CC_P, 0, TRUE);
EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
+ x86_patch (jump, code);
break;
}
x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
EMIT_COND_BRANCH (ins, X86_CC_EQ, TRUE);
break;
case OP_FBNE_UN:
+ /* Branch if C013 != 100 */
if (cfg->opt & MONO_OPT_FCMOV) {
- EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
+ /* branch if !ZF or (PF|CF) */
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
+ EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
+ EMIT_COND_BRANCH (ins, X86_CC_B, FALSE);
break;
}
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4000);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
break;
case OP_FBLT:
end_jump = code;
x86_jump8 (code, 0);
x86_patch (is_not_zero_check, code);
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
x86_patch (end_jump, code);
}
EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
break;
}
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x0100);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
if (ins->opcode == OP_FBGT_UN) {
guchar *is_not_zero_check, *end_jump;
is_not_zero_check = code;
end_jump = code;
x86_jump8 (code, 0);
x86_patch (is_not_zero_check, code);
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x4500);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_CC_MASK);
x86_patch (end_jump, code);
}
EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
break;
case OP_FBGE:
+ /* Branch if C013 == 100 or 001 */
+ if (cfg->opt & MONO_OPT_FCMOV) {
+ guchar *br1;
+
+ /* skip branch if C1=1 */
+ br1 = code;
+ x86_branch8 (code, X86_CC_P, 0, FALSE);
+ /* branch if (C0 | C3) = 1 */
+ EMIT_COND_BRANCH (ins, X86_CC_BE, FALSE);
+ x86_patch (br1, code);
+ break;
+ }
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
+ EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C3);
+ EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
+ break;
case OP_FBGE_UN:
+ /* Branch if C013 == 000 */
if (cfg->opt & MONO_OPT_FCMOV) {
EMIT_COND_BRANCH (ins, X86_CC_LE, FALSE);
break;
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
break;
case OP_FBLE:
+ /* Branch if C013=000 or 100 */
+ if (cfg->opt & MONO_OPT_FCMOV) {
+ guchar *br1;
+
+ /* skip branch if C1=1 */
+ br1 = code;
+ x86_branch8 (code, X86_CC_P, 0, FALSE);
+ /* branch if C0=0 */
+ EMIT_COND_BRANCH (ins, X86_CC_NB, FALSE);
+ x86_patch (br1, code);
+ break;
+ }
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, (X86_FP_C0|X86_FP_C1));
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0);
+ EMIT_COND_BRANCH (ins, X86_CC_EQ, FALSE);
+ break;
case OP_FBLE_UN:
+ /* Branch if C013 != 001 */
if (cfg->opt & MONO_OPT_FCMOV) {
EMIT_COND_BRANCH (ins, X86_CC_P, FALSE);
EMIT_COND_BRANCH (ins, X86_CC_GE, FALSE);
break;
}
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x0100);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
break;
case CEE_CKFINITE: {
x86_fxam (code);
x86_fnstsw (code);
x86_alu_reg_imm (code, X86_AND, X86_EAX, 0x4100);
- x86_alu_reg_imm (code, X86_CMP, X86_EAX, 0x0100);
+ x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
x86_pop_reg (code, X86_EAX);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
break;
}
if ((code - cfg->native_code - offset) > max_len) {
- g_warning ("wrong maximal instruction length of instruction %s (exptected %d, got %d)",
+ g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
}
void
mono_arch_register_lowlevel_calls (void)
{
- mono_register_jit_icall (enter_method, "mono_enter_method", NULL, TRUE);
- mono_register_jit_icall (leave_method, "mono_leave_method", NULL, TRUE);
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji)
+mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
for (patch_info = ji; patch_info; patch_info = patch_info->next) {
unsigned char *ip = patch_info->ip.i + code;
- const unsigned char *target = NULL;
+ const unsigned char *target;
+
+ target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
switch (patch_info->type) {
- case MONO_PATCH_INFO_BB:
- target = patch_info->data.bb->native_offset + code;
- break;
- case MONO_PATCH_INFO_ABS:
- target = patch_info->data.target;
- break;
- case MONO_PATCH_INFO_LABEL:
- target = patch_info->data.inst->inst_c0 + code;
- break;
case MONO_PATCH_INFO_IP:
- *((gpointer *)(ip)) = ip;
+ *((gconstpointer *)(ip)) = target;
+ continue;
+ case MONO_PATCH_INFO_METHOD_REL:
+ *((gconstpointer *)(ip)) = target;
continue;
- case MONO_PATCH_INFO_INTERNAL_METHOD: {
- MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name);
- if (!mi) {
- g_warning ("unknown MONO_PATCH_INFO_INTERNAL_METHOD %s", patch_info->data.name);
- g_assert_not_reached ();
- }
- target = mi->wrapper;
- break;
- }
- case MONO_PATCH_INFO_METHOD:
- if (patch_info->data.method == method) {
- target = code;
- } else {
- /* get the trampoline to the method from the domain */
- target = mono_arch_create_jit_trampoline (patch_info->data.method);
- }
- break;
case MONO_PATCH_INFO_SWITCH: {
- gpointer *table = (gpointer *)patch_info->data.target;
- int i;
-
- *((gconstpointer *)(ip + 2)) = patch_info->data.target;
-
- for (i = 0; i < patch_info->table_size; i++) {
- table [i] = (int)patch_info->data.table [i] + code;
- }
- /* we put into the table the absolute address, no need fo x86_patch in this case */
+ *((gconstpointer *)(ip + 2)) = target;
+ /* we put into the table the absolute address, no need for x86_patch in this case */
continue;
}
+ case MONO_PATCH_INFO_IID:
+ *((guint32 *)(ip + 1)) = (guint32)target;
+ continue;
+ case MONO_PATCH_INFO_CLASS_INIT: {
+ guint8 *code = ip;
+ /* Might already been changed to a nop */
+ x86_call_imm (code, 0);
+ break;
+ }
+ case MONO_PATCH_INFO_R4:
+ case MONO_PATCH_INFO_R8:
+ *((gconstpointer *)(ip + 2)) = target;
+ continue;
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IMAGE:
case MONO_PATCH_INFO_FIELD:
- *((gconstpointer *)(ip + 1)) = patch_info->data.target;
- continue;
- case MONO_PATCH_INFO_R4:
- case MONO_PATCH_INFO_R8:
- *((gconstpointer *)(ip + 2)) = patch_info->data.target;
+ case MONO_PATCH_INFO_VTABLE:
+ case MONO_PATCH_INFO_SFLDA:
+ case MONO_PATCH_INFO_EXC_NAME:
+ case MONO_PATCH_INFO_LDSTR:
+ case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
+ case MONO_PATCH_INFO_LDTOKEN:
+ *((gconstpointer *)(ip + 1)) = target;
continue;
default:
- g_assert_not_reached ();
+ break;
}
x86_patch (ip, target);
}
if (cfg->method->save_lmf)
max_epilog_size += 128;
- if (mono_jit_trace_calls)
+ if (mono_jit_trace_calls != NULL)
max_epilog_size += 50;
- if (mono_jit_profile)
+ if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
max_epilog_size += 50;
/* count the number of exception infos */
int alloc_size, pos, max_offset, i;
guint8 *code;
- cfg->code_size = 256;
+ cfg->code_size = MAX (((MonoMethodNormal *)method)->header->code_size * 4, 256);
code = cfg->native_code = g_malloc (cfg->code_size);
x86_push_reg (code, X86_EBP);
if (method->save_lmf) {
pos += sizeof (MonoLMF);
-
+
/* save the current IP */
mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
x86_push_imm (code, 0);
/* save all caller saved regs */
- x86_push_reg (code, X86_EBX);
- x86_push_reg (code, X86_EDI);
- x86_push_reg (code, X86_ESI);
x86_push_reg (code, X86_EBP);
+ x86_push_reg (code, X86_ESI);
+ x86_push_reg (code, X86_EDI);
+ x86_push_reg (code, X86_EBX);
/* save method info */
x86_push_imm (code, method);
-
+
/* get the address of lmf for the current thread */
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_get_lmf_addr");
- x86_call_code (code, 0);
+ /*
+ * This is performance critical so we try to use some tricks to make
+ * it fast.
+ */
+ if (lmf_tls_offset != -1) {
+ /* Load lmf quicky using the GS register */
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_reg_mem (code, X86_EAX, 0, 4);
+ x86_mov_reg_membase (code, X86_EAX, X86_EAX, lmf_tls_offset, 4);
+ }
+ else {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_get_lmf_addr");
+ x86_call_code (code, 0);
+ }
/* push lmf */
x86_push_reg (code, X86_EAX);
alloc_size -= pos;
- if (alloc_size)
+ if (alloc_size) {
+ /* See mono_emit_stack_alloc */
+#ifdef PLATFORM_WIN32
+ guint32 remaining_size = alloc_size;
+ while (remaining_size >= 0x1000) {
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
+ x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
+ remaining_size -= 0x1000;
+ }
+ if (remaining_size)
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, remaining_size);
+#else
x86_alu_reg_imm (code, X86_SUB, X86_ESP, alloc_size);
+#endif
+ }
/* compute max_offset in order to use short forward jumps */
max_offset = 0;
MonoInst *ins = bb->code;
bb->max_offset = max_offset;
- if (mono_trace_coverage)
- max_offset += 6;
+ if (cfg->prof_options & MONO_PROFILE_COVERAGE)
+ max_offset += 6;
+ /* max alignment for loops */
+ if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
+ max_offset += LOOP_ALIGNMENT;
while (ins) {
+ if (ins->opcode == OP_LABEL)
+ ins->inst_c1 = max_offset;
+
max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
ins = ins->next;
}
}
}
- if (mono_jit_trace_calls)
- code = mono_arch_instrument_prolog (cfg, enter_method, code, TRUE);
+ if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
+ code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
/* load arguments allocated to register from the stack */
sig = method->signature;
{
MonoJumpInfo *patch_info;
MonoMethod *method = cfg->method;
+ MonoMethodSignature *sig = method->signature;
int pos;
+ guint32 stack_to_pop;
guint8 *code;
code = cfg->native_code + cfg->code_len;
- if (mono_jit_trace_calls)
- code = mono_arch_instrument_epilog (cfg, leave_method, code, TRUE);
+ if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
+ code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
-
+ /* the code restoring the registers must be kept in sync with CEE_JMP */
pos = 0;
if (method->save_lmf) {
- pos = -sizeof (MonoLMF);
+ gint32 prev_lmf_reg;
+
+ /* Find a spare register */
+ switch (sig->ret->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ prev_lmf_reg = X86_EDI;
+ cfg->used_int_regs |= (1 << X86_EDI);
+ break;
+ default:
+ prev_lmf_reg = X86_EDX;
+ break;
+ }
+
+ /* reg = previous_lmf */
+ x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, -32, 4);
+
+ /* ecx = lmf */
+ x86_mov_reg_membase (code, X86_ECX, X86_EBP, -28, 4);
+
+ /* *(lmf) = previous_lmf */
+ x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
+
+ /* restore caller saved regs */
+ if (cfg->used_int_regs & (1 << X86_EBX)) {
+ x86_mov_reg_membase (code, X86_EBX, X86_EBP, -20, 4);
+ }
+
+ if (cfg->used_int_regs & (1 << X86_EDI)) {
+ x86_mov_reg_membase (code, X86_EDI, X86_EBP, -16, 4);
+ }
+ if (cfg->used_int_regs & (1 << X86_ESI)) {
+ x86_mov_reg_membase (code, X86_ESI, X86_EBP, -12, 4);
+ }
+
+ /* EBP is restored by LEAVE */
} else {
if (cfg->used_int_regs & (1 << X86_EBX)) {
pos -= 4;
if (cfg->used_int_regs & (1 << X86_ESI)) {
pos -= 4;
}
- }
- if (pos)
- x86_lea_membase (code, X86_ESP, X86_EBP, pos);
-
- if (method->save_lmf) {
- /* ebx = previous_lmf */
- x86_pop_reg (code, X86_EBX);
- /* edi = lmf */
- x86_pop_reg (code, X86_EDI);
- /* *(lmf) = previous_lmf */
- x86_mov_membase_reg (code, X86_EDI, 0, X86_EBX, 4);
-
- /* discard method info */
- x86_pop_reg (code, X86_ESI);
-
- /* restore caller saved regs */
- x86_pop_reg (code, X86_EBP);
- x86_pop_reg (code, X86_ESI);
- x86_pop_reg (code, X86_EDI);
- x86_pop_reg (code, X86_EBX);
-
- } else {
+ if (pos)
+ x86_lea_membase (code, X86_ESP, X86_EBP, pos);
if (cfg->used_int_regs & (1 << X86_ESI)) {
x86_pop_reg (code, X86_ESI);
}
x86_leave (code);
- x86_ret (code);
+
+ if (CALLCONV_IS_STDCALL (sig->call_convention)) {
+ MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
+
+ stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
+ } else if (MONO_TYPE_ISSTRUCT (cfg->method->signature->ret))
+ stack_to_pop = 4;
+ else
+ stack_to_pop = 0;
+
+ if (stack_to_pop)
+ x86_ret_imm (code, stack_to_pop);
+ else
+ x86_ret (code);
/* add code to raise exceptions */
for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
switch (patch_info->type) {
case MONO_PATCH_INFO_EXC:
x86_patch (patch_info->ip.i + cfg->native_code, code);
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
x86_push_imm (code, patch_info->data.target);
+ mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_METHOD_REL, (gpointer)patch_info->ip.i);
x86_push_imm (code, patch_info->ip.i + cfg->native_code);
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
patch_info->data.name = "mono_arch_throw_exception_by_name";
g_assert (cfg->code_len < cfg->code_size);
}
+
+void
+mono_arch_flush_icache (guint8 *code, gint size)
+{
+ /* not needed */
+}
+
+void
+mono_arch_flush_register_windows (void)
+{
+}
+
+/*
+ * Support for fast access to the thread-local lmf structure using the GS
+ * segment register on NPTL + kernel 2.6.x.
+ */
+
+static gboolean tls_offset_inited = FALSE;
+
+void
+mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
+{
+#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
+ pthread_t self = pthread_self();
+ pthread_attr_t attr;
+ void *staddr = NULL;
+ size_t stsize = 0;
+ struct sigaltstack sa;
+#endif
+
+ if (!tls_offset_inited) {
+ guint8 *code;
+
+ tls_offset_inited = TRUE;
+
+ code = (guint8*)mono_get_lmf_addr;
+
+ if (getenv ("MONO_NPTL")) {
+ /*
+ * Determine the offset of mono_lfm_addr inside the TLS structures
+ * by disassembling the function above.
+ */
+
+ /* This is generated by gcc 3.3.2 */
+ if ((code [0] == 0x55) && (code [1] == 0x89) && (code [2] == 0xe5) &&
+ (code [3] == 0x65) && (code [4] == 0xa1) && (code [5] == 0x00) &&
+ (code [6] == 0x00) && (code [7] == 0x00) && (code [8] == 0x00) &&
+ (code [9] == 0x8b) && (code [10] == 0x80)) {
+ lmf_tls_offset = *(int*)&(code [11]);
+ }
+ else
+ /* This is generated by gcc-3.4 */
+ if ((code [0] == 0x55) && (code [1] == 0x89) && (code [2] == 0xe5) &&
+ (code [3] == 0x65) && (code [4] == 0xa1)) {
+ lmf_tls_offset = *(int*)&(code [5]);
+ }
+ }
+ }
+
+#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
+
+ /* Determine stack boundaries */
+ if (!mono_running_on_valgrind ()) {
+#ifdef HAVE_PTHREAD_GETATTR_NP
+ pthread_getattr_np( self, &attr );
+#else
+#ifdef HAVE_PTHREAD_ATTR_GET_NP
+ pthread_attr_get_np( self, &attr );
+#elif defined(sun)
+ pthread_attr_init( &attr );
+ pthread_attr_getstacksize( &attr, &stsize );
+#else
+#error "Not implemented"
+#endif
+#endif
+#ifndef sun
+ pthread_attr_getstack( &attr, &staddr, &stsize );
+#endif
+ }
+
+ /*
+ * staddr seems to be wrong for the main thread, so we keep the value in
+ * tls->end_of_stack
+ */
+ tls->stack_size = stsize;
+
+ /* Setup an alternate signal stack */
+ tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
+ tls->signal_stack_size = SIGNAL_STACK_SIZE;
+
+ sa.ss_sp = tls->signal_stack;
+ sa.ss_size = SIGNAL_STACK_SIZE;
+ sa.ss_flags = SS_ONSTACK;
+ sigaltstack (&sa, NULL);
+#endif
+}
+
+void
+mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
+{
+#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
+ struct sigaltstack sa;
+
+ sa.ss_sp = tls->signal_stack;
+ sa.ss_size = SIGNAL_STACK_SIZE;
+ sa.ss_flags = SS_DISABLE;
+ sigaltstack (&sa, NULL);
+
+ if (tls->signal_stack)
+ g_free (tls->signal_stack);
+#endif
+}
+
+void
+mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
+{
+
+ /* add the this argument */
+ if (this_reg != -1) {
+ MonoInst *this;
+ MONO_INST_NEW (cfg, this, OP_OUTARG);
+ this->type = this_type;
+ this->sreg1 = this_reg;
+ mono_bblock_add_inst (cfg->cbb, this);
+ }
+
+ if (vt_reg != -1) {
+ MonoInst *vtarg;
+ MONO_INST_NEW (cfg, vtarg, OP_OUTARG);
+ vtarg->type = STACK_MP;
+ vtarg->sreg1 = vt_reg;
+ mono_bblock_add_inst (cfg->cbb, vtarg);
+ }
+}
+
+
+gint
+mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ if (cmethod->klass == mono_defaults.math_class) {
+ if (strcmp (cmethod->name, "Sin") == 0)
+ return OP_SIN;
+ else if (strcmp (cmethod->name, "Cos") == 0)
+ return OP_COS;
+ else if (strcmp (cmethod->name, "Tan") == 0)
+ return OP_TAN;
+ else if (strcmp (cmethod->name, "Atan") == 0)
+ return OP_ATAN;
+ else if (strcmp (cmethod->name, "Sqrt") == 0)
+ return OP_SQRT;
+ else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8)
+ return OP_ABS;
+#if 0
+ /* OP_FREM is not IEEE compatible */
+ else if (strcmp (cmethod->name, "IEEERemainder") == 0)
+ return OP_FREM;
+#endif
+ else
+ return -1;
+ } else {
+ return -1;
+ }
+ return -1;
+}
+
+
+gboolean
+mono_arch_print_tree (MonoInst *tree, int arity)
+{
+ return 0;
+}