#include "inssel.h"
#include "cpu-pentium.h"
+/* On windows, these hold the key returned by TlsAlloc () */
static gint lmf_tls_offset = -1;
static gint appdomain_tls_offset = -1;
static gint thread_tls_offset = -1;
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+#define ARGS_OFFSET 8
+
#ifdef PLATFORM_WIN32
/* Under windows, the default pinvoke calling convention is stdcall */
-#define CALLCONV_IS_STDCALL(call_conv) (((call_conv) == MONO_CALL_STDCALL) || ((call_conv) == MONO_CALL_DEFAULT))
+#define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
#else
-#define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
+#define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
#endif
-#define SIGNAL_STACK_SIZE (64 * 1024)
-
#define NOT_IMPLEMENTED g_assert_not_reached ()
const char*
return "unknown";
}
+const char*
+mono_arch_fregname (int reg) {
+ return "unknown";
+}
+
typedef enum {
ArgInIReg,
ArgInFloatSSEReg,
ArgInDoubleSSEReg,
ArgOnStack,
ArgValuetypeInReg,
- ArgOnFpStack,
- ArgNone /* only in pair_storage */
+ ArgOnFloatFpStack,
+ ArgOnDoubleFpStack,
+ ArgNone
} ArgStorage;
typedef struct {
#define FLOAT_PARAM_REGS 0
-static X86_Reg_No param_regs [] = { };
+static X86_Reg_No param_regs [] = { 0 };
#ifdef PLATFORM_WIN32
static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
if (*gr >= FLOAT_PARAM_REGS) {
ainfo->storage = ArgOnStack;
- (*stack_size) += sizeof (gpointer);
+ (*stack_size) += is_double ? 8 : 4;
}
else {
/* A double register */
info = mono_marshal_load_type_info (klass);
g_assert (info);
+ ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+
/* Special case structs with only a float member */
if ((info->native_size == 8) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R8)) {
- ainfo->storage = ArgOnFpStack;
+ ainfo->storage = ArgValuetypeInReg;
+ ainfo->pair_storage [0] = ArgOnDoubleFpStack;
return;
}
if ((info->native_size == 4) && (info->num_fields == 1) && (info->fields [0].field->type->type == MONO_TYPE_R4)) {
- ainfo->storage = ArgOnFpStack;
+ ainfo->storage = ArgValuetypeInReg;
+ ainfo->pair_storage [0] = ArgOnFloatFpStack;
return;
}
if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ArgInIReg;
ainfo->pair_regs [0] = return_regs [0];
- ainfo->pair_storage [1] = ArgInIReg;
- ainfo->pair_regs [1] = return_regs [1];
+ if (info->native_size > 4) {
+ ainfo->pair_storage [1] = ArgInIReg;
+ ainfo->pair_regs [1] = return_regs [1];
+ }
return;
}
}
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_SZARRAY:
cinfo->ret.reg = X86_EAX;
break;
case MONO_TYPE_R4:
- cinfo->ret.storage = ArgOnFpStack;
+ cinfo->ret.storage = ArgOnFloatFpStack;
break;
case MONO_TYPE_R8:
- cinfo->ret.storage = ArgOnFpStack;
+ cinfo->ret.storage = ArgOnDoubleFpStack;
break;
case MONO_TYPE_VALUETYPE: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
;
break;
case MONO_TYPE_VOID:
+ cinfo->ret.storage = ArgNone;
break;
default:
g_error ("Can't handle as return value 0x%x", sig->ret->type);
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
add_float (&fr, &stack_size, ainfo, TRUE);
break;
default:
+ g_error ("unexpected type 0x%x", ptype->type);
g_assert_not_reached ();
}
}
int k, frame_size = 0;
int size, align, pad;
int offset = 8;
+ CallInfo *cinfo;
- if (MONO_TYPE_ISSTRUCT (csig->ret)) {
+ cinfo = get_call_info (csig, FALSE);
+
+ if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
frame_size += sizeof (gpointer);
offset += 4;
}
frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
+ g_free (cinfo);
+
return frame_size;
}
cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
{
int have_cpuid = 0;
+#ifndef _MSC_VER
__asm__ __volatile__ (
"pushfl\n"
"popl %%eax\n"
:
: "%eax", "%edx"
);
-
+#else
+ __asm {
+ pushfd
+ pop eax
+ mov edx, eax
+ xor eax, 0x200000
+ push eax
+ popfd
+ pushfd
+ pop eax
+ xor eax, edx
+ and eax, 0x200000
+ mov have_cpuid, eax
+ }
+#endif
if (have_cpuid) {
/* Have to use the code manager to get around WinXP DEP */
MonoCodeManager *codeman = mono_code_manager_new_dynamic ();
+ CpuidFunc func;
void *ptr = mono_code_manager_reserve (codeman, sizeof (cpuid_impl));
memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
- CpuidFunc func = (CpuidFunc)ptr;
+ func = (CpuidFunc)ptr;
func (id, p_eax, p_ebx, p_ecx, p_edx);
mono_code_manager_destroy (codeman);
void
mono_arch_cpu_init (void)
{
+ /* spec compliance requires running with double precision */
+#ifndef _MSC_VER
guint16 fpcw;
- /* spec compliance requires running with double precision */
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
fpcw &= ~X86_FPCW_PRECC_MASK;
fpcw |= X86_FPCW_PREC_DOUBLE;
__asm__ __volatile__ ("fldcw %0\n": : "m" (fpcw));
__asm__ __volatile__ ("fnstcw %0\n": "=m" (fpcw));
-
- mono_x86_tramp_init ();
+#else
+ _control87 (_PC_53, MCW_PC);
+#endif
}
/*
gboolean
mono_arch_is_int_overflow (void *sigctx, void *info)
{
- struct sigcontext *ctx = (struct sigcontext*)sigctx;
+ MonoContext ctx;
guint8* ip;
- ip = (guint8*)ctx->SC_EIP;
+ mono_arch_sigctx_to_monoctx (sigctx, &ctx);
+
+ ip = (guint8*)ctx.eip;
if ((ip [0] == 0xf7) && (x86_modrm_mod (ip [1]) == 0x3) && (x86_modrm_reg (ip [1]) == 0x7)) {
gint32 reg;
/* idiv REG */
switch (x86_modrm_rm (ip [1])) {
+ case X86_EAX:
+ reg = ctx.eax;
+ break;
case X86_ECX:
- reg = ctx->SC_ECX;
+ reg = ctx.ecx;
+ break;
+ case X86_EDX:
+ reg = ctx.edx;
break;
case X86_EBX:
- reg = ctx->SC_EBX;
+ reg = ctx.ebx;
+ break;
+ case X86_ESI:
+ reg = ctx.esi;
+ break;
+ case X86_EDI:
+ reg = ctx.edi;
break;
default:
g_assert_not_reached ();
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
return TRUE;
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
* The locals var stuff should most likely be split in another method.
*/
void
-mono_arch_allocate_vars (MonoCompile *m)
+mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
MonoInst *inst;
guint32 locals_stack_size, locals_stack_align;
- int i, offset, curinst, size, align;
+ int i, offset;
gint32 *offsets;
+ CallInfo *cinfo;
- header = mono_method_get_header (m->method);
- sig = mono_method_signature (m->method);
-
- offset = 8;
- curinst = 0;
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- m->ret->opcode = OP_REGOFFSET;
- m->ret->inst_basereg = X86_EBP;
- m->ret->inst_offset = offset;
- offset += sizeof (gpointer);
- } else {
- /* FIXME: handle long and FP values */
- switch (sig->ret->type) {
- case MONO_TYPE_VOID:
- break;
- default:
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = X86_EAX;
- break;
- }
- }
- if (sig->hasthis) {
- inst = m->varinfo [curinst];
- if (inst->opcode != OP_REGVAR) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = X86_EBP;
- }
- inst->inst_offset = offset;
- offset += sizeof (gpointer);
- curinst++;
- }
-
- if (sig->call_convention == MONO_CALL_VARARG) {
- m->sig_cookie = offset;
- offset += sizeof (gpointer);
- }
+ header = mono_method_get_header (cfg->method);
+ sig = mono_method_signature (cfg->method);
- for (i = 0; i < sig->param_count; ++i) {
- inst = m->varinfo [curinst];
- if (inst->opcode != OP_REGVAR) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = X86_EBP;
- }
- inst->inst_offset = offset;
- size = mono_type_size (sig->params [i], &align);
- size += 4 - 1;
- size &= ~(4 - 1);
- offset += size;
- curinst++;
- }
+ cinfo = get_call_info (sig, FALSE);
+ cfg->frame_reg = MONO_ARCH_BASEREG;
offset = 0;
- /* reserve space to save LMF and caller saved registers */
+ /* Reserve space to save LMF and caller saved registers */
- if (m->method->save_lmf) {
+ if (cfg->method->save_lmf) {
offset += sizeof (MonoLMF);
} else {
- if (m->used_int_regs & (1 << X86_EBX)) {
+ if (cfg->used_int_regs & (1 << X86_EBX)) {
offset += 4;
}
- if (m->used_int_regs & (1 << X86_EDI)) {
+ if (cfg->used_int_regs & (1 << X86_EDI)) {
offset += 4;
}
- if (m->used_int_regs & (1 << X86_ESI)) {
+ if (cfg->used_int_regs & (1 << X86_ESI)) {
offset += 4;
}
}
+ switch (cinfo->ret.storage) {
+ case ArgValuetypeInReg:
+ /* Allocate a local to hold the result, the epilog will copy it to the correct place */
+ offset += 8;
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = X86_EBP;
+ cfg->ret->inst_offset = - offset;
+ break;
+ default:
+ break;
+ }
+
/* Allocate locals */
- offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
+ offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
- for (i = m->locals_start; i < m->num_varinfo; i++) {
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
if (offsets [i] != -1) {
- MonoInst *inst = m->varinfo [i];
+ MonoInst *inst = cfg->varinfo [i];
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = X86_EBP;
inst->inst_offset = - (offset + offsets [i]);
g_free (offsets);
offset += locals_stack_size;
+
+ /*
+ * Allocate arguments+return value
+ */
+
+ switch (cinfo->ret.storage) {
+ case ArgOnStack:
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = X86_EBP;
+ cfg->ret->inst_offset = cinfo->ret.offset + ARGS_OFFSET;
+ break;
+ case ArgValuetypeInReg:
+ break;
+ case ArgInIReg:
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
+ break;
+ case ArgNone:
+ case ArgOnFloatFpStack:
+ case ArgOnDoubleFpStack:
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ if (sig->call_convention == MONO_CALL_VARARG) {
+ g_assert (cinfo->sig_cookie.storage == ArgOnStack);
+ cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
+ }
+
+ for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+ ArgInfo *ainfo = &cinfo->args [i];
+ inst = cfg->varinfo [i];
+ if (inst->opcode != OP_REGVAR) {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = X86_EBP;
+ }
+ inst->inst_offset = ainfo->offset + ARGS_OFFSET;
+ }
+
offset += (MONO_ARCH_FRAME_ALIGNMENT - 1);
offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
- /* change sign? */
- m->stack_offset = -offset;
+ cfg->stack_offset = offset;
+
+ g_free (cinfo);
+}
+
+void
+mono_arch_create_vars (MonoCompile *cfg)
+{
+ MonoMethodSignature *sig;
+ CallInfo *cinfo;
+
+ sig = mono_method_signature (cfg->method);
+
+ cinfo = get_call_info (sig, FALSE);
+
+ if (cinfo->ret.storage == ArgValuetypeInReg)
+ cfg->ret_var_is_local = TRUE;
+
+ g_free (cinfo);
}
/* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
* instructions to properly call the function in call.
* This includes pushing, moving arguments to the right register
* etc.
- * Issue: who does the spilling if needed, and when?
*/
MonoCallInst*
mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
MonoInst *arg, *in;
MonoMethodSignature *sig;
- int i, n, stack_size, type;
- MonoType *ptype;
+ int i, n;
CallInfo *cinfo;
+ int sentinelpos;
- stack_size = 0;
- /* add the vararg cookie before the non-implicit args */
- if (call->signature->call_convention == MONO_CALL_VARARG) {
- MonoInst *sig_arg;
- /* FIXME: Add support for signature tokens to AOT */
- cfg->disable_aot = TRUE;
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->inst_p0 = call->signature;
- arg->inst_left = sig_arg;
- arg->type = STACK_PTR;
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
- stack_size += sizeof (gpointer);
- }
sig = call->signature;
n = sig->param_count + sig->hasthis;
cinfo = get_call_info (sig, FALSE);
- if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
- if (cinfo->ret.storage == ArgOnStack)
- stack_size += sizeof (gpointer);
- }
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+ sentinelpos = sig->sentinelpos + (is_virtual ? 1 : 0);
for (i = 0; i < n; ++i) {
+ ArgInfo *ainfo = cinfo->args + i;
+
+ /* Emit the signature cookie just before the implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
+ MonoMethodSignature *tmp_sig;
+ MonoInst *sig_arg;
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+ MONO_INST_NEW (cfg, arg, OP_OUTARG);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->inst_p0 = tmp_sig;
+
+ arg->inst_left = sig_arg;
+ arg->type = STACK_PTR;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+ }
+
if (is_virtual && i == 0) {
/* the argument will be attached to the call instrucion */
in = call->args [i];
- stack_size += 4;
} else {
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mono_type_get_underlying_type (t);
+
MONO_INST_NEW (cfg, arg, OP_OUTARG);
in = call->args [i];
arg->cil_code = in->cil_code;
/* prepend, so they get reversed */
arg->next = call->out_args;
call->out_args = arg;
- if (i >= sig->hasthis) {
- MonoType *t = sig->params [i - sig->hasthis];
- ptype = mono_type_get_underlying_type (t);
- if (t->byref)
- type = MONO_TYPE_U;
- else
- type = ptype->type;
- /* FIXME: validate arguments... */
- switch (type) {
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_STRING:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_SZARRAY:
- stack_size += 4;
- break;
- case MONO_TYPE_I8:
- case MONO_TYPE_U8:
- stack_size += 8;
- break;
- case MONO_TYPE_R4:
- stack_size += 4;
- arg->opcode = OP_OUTARG_R4;
- break;
- case MONO_TYPE_R8:
- stack_size += 8;
- arg->opcode = OP_OUTARG_R8;
- break;
- case MONO_TYPE_VALUETYPE: {
- int size;
- if (sig->pinvoke)
- size = mono_type_native_stack_size (&in->klass->byval_arg, NULL);
- else
- size = mono_type_stack_size (&in->klass->byval_arg, NULL);
- stack_size += size;
- arg->opcode = OP_OUTARG_VT;
- arg->klass = in->klass;
- arg->unused = sig->pinvoke;
- arg->inst_imm = size;
- break;
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
+ gint align;
+ guint32 size;
+
+ if (t->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
}
- case MONO_TYPE_TYPEDBYREF:
- stack_size += sizeof (MonoTypedRef);
- arg->opcode = OP_OUTARG_VT;
- arg->klass = in->klass;
- arg->unused = sig->pinvoke;
- arg->inst_imm = sizeof (MonoTypedRef);
+ else
+ if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else
+ size = mono_type_stack_size (&in->klass->byval_arg, &align);
+ arg->opcode = OP_OUTARG_VT;
+ arg->klass = in->klass;
+ arg->unused = sig->pinvoke;
+ arg->inst_imm = size;
+ }
+ else {
+ switch (ainfo->storage) {
+ case ArgOnStack:
+ arg->opcode = OP_OUTARG;
+ if (!t->byref) {
+ if (t->type == MONO_TYPE_R4)
+ arg->opcode = OP_OUTARG_R4;
+ else
+ if (t->type == MONO_TYPE_R8)
+ arg->opcode = OP_OUTARG_R8;
+ }
break;
default:
- g_error ("unknown type 0x%02x in mono_arch_call_opcode\n", type);
+ g_assert_not_reached ();
}
- } else {
- /* the this argument */
- stack_size += 4;
}
}
}
else
/* if the function returns a struct, the called method already does a ret $0x4 */
if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
- stack_size -= 4;
+ cinfo->stack_usage -= 4;
}
- call->stack_usage = stack_size;
+ call->stack_usage = cinfo->stack_usage;
g_free (cinfo);
- /*
- * should set more info in call, such as the stack space
- * used by the args that needs to be added back to esp
- */
-
return call;
}
} \
}
-/* emit an exception if condition is fail */
+/*
+ * Emit an exception if condition is fail and
+ * if possible do a directly branch to target
+ */
#define EMIT_COND_SYSTEM_EXCEPTION(cond,signed,exc_name) \
- do { \
- mono_add_patch_info (cfg, code - cfg->native_code, \
- MONO_PATCH_INFO_EXC, exc_name); \
- x86_branch32 (code, cond, 0, signed); \
+ do { \
+ MonoInst *tins = mono_branch_optimize_exception_target (cfg, bb, exc_name); \
+ if (tins == NULL) { \
+ mono_add_patch_info (cfg, code - cfg->native_code, \
+ MONO_PATCH_INFO_EXC, exc_name); \
+ x86_branch32 (code, cond, 0, signed); \
+ } else { \
+ EMIT_COND_BRANCH (tins, cond, signed); \
+ } \
} while (0);
#define EMIT_FPCOMPARE(code) do { \
case OP_LOADU1_MEMBASE:
case OP_LOADI1_MEMBASE:
/*
- * Note: if reg1 = reg2 the load op is removed
- *
- * OP_STORE_MEMBASE_REG reg1, offset(basereg)
- * OP_LOAD_MEMBASE offset(basereg), reg2
- * -->
- * OP_STORE_MEMBASE_REG reg1, offset(basereg)
- * OP_MOVE reg1, reg2
- */
- if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
- ins->inst_basereg == last_ins->inst_destbasereg &&
- ins->inst_offset == last_ins->inst_offset) {
- if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- } else {
- //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
- ins->opcode = OP_MOVE;
- ins->sreg1 = last_ins->sreg1;
- }
- }
- break;
- case OP_LOADU2_MEMBASE:
- case OP_LOADI2_MEMBASE:
- /*
- * Note: if reg1 = reg2 the load op is removed
- *
- * OP_STORE_MEMBASE_REG reg1, offset(basereg)
- * OP_LOAD_MEMBASE offset(basereg), reg2
- * -->
- * OP_STORE_MEMBASE_REG reg1, offset(basereg)
- * OP_MOVE reg1, reg2
- */
- if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
- ins->inst_basereg == last_ins->inst_destbasereg &&
- ins->inst_offset == last_ins->inst_offset) {
- if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- } else {
- //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
- ins->opcode = OP_MOVE;
- ins->sreg1 = last_ins->sreg1;
- }
- }
- break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
- case OP_MOVE:
- /*
- * Removes:
- *
- * OP_MOVE reg, reg
- */
- if (ins->dreg == ins->sreg1) {
- if (last_ins)
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- }
- /*
- * Removes:
- *
- * OP_MOVE sreg, dreg
- * OP_MOVE dreg, sreg
- */
- if (last_ins && last_ins->opcode == OP_MOVE &&
- ins->sreg1 == last_ins->dreg &&
- ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- }
- break;
-
- case OP_X86_PUSH_MEMBASE:
- if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
- last_ins->opcode == OP_STORE_MEMBASE_REG) &&
- ins->inst_basereg == last_ins->inst_destbasereg &&
- ins->inst_offset == last_ins->inst_offset) {
- ins->opcode = OP_X86_PUSH;
- ins->sreg1 = last_ins->sreg1;
- }
- break;
- }
- last_ins = ins;
- ins = ins->next;
- }
- bb->last_ins = last_ins;
-}
-
-static const int
-branch_cc_table [] = {
- X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
- X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
- X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
-};
-
-#define DEBUG(a) if (cfg->verbose_level > 1) a
-//#define DEBUG(a)
-
-/*
- * returns the offset used by spillvar. It allocates a new
- * spill variable if necessary.
- */
-static int
-mono_spillvar_offset (MonoCompile *cfg, int spillvar)
-{
- MonoSpillInfo **si, *info;
- int i = 0;
-
- si = &cfg->spill_info;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset -= sizeof (gpointer);
- info->offset = cfg->stack_offset;
- }
-
- if (i == spillvar)
- return (*si)->offset;
-
- i++;
- si = &(*si)->next;
- }
-
- g_assert_not_reached ();
- return 0;
-}
-
-/*
- * returns the offset used by spillvar. It allocates a new
- * spill float variable if necessary.
- * (same as mono_spillvar_offset but for float)
- */
-static int
-mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
-{
- MonoSpillInfo **si, *info;
- int i = 0;
-
- si = &cfg->spill_info_float;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset -= sizeof (double);
- info->offset = cfg->stack_offset;
- }
-
- if (i == spillvar)
- return (*si)->offset;
-
- i++;
- si = &(*si)->next;
- }
-
- g_assert_not_reached ();
- return 0;
-}
-
-/*
- * Creates a store for spilled floating point items
- */
-static MonoInst*
-create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
-{
- MonoInst *store;
- MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
- store->sreg1 = reg;
- store->inst_destbasereg = X86_EBP;
- store->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
- DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08x(%%sp)) (from %d)\n", spill, store->inst_offset, reg));
- return store;
-}
-
-/*
- * Creates a load for spilled floating point items
- */
-static MonoInst*
-create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
-{
- MonoInst *load;
- MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
- load->dreg = reg;
- load->inst_basereg = X86_EBP;
- load->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
- DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08x(%%sp)) (from %d)\n", spill, load->inst_offset, reg));
- return load;
-}
-
-#define is_global_ireg(r) ((r) >= 0 && (r) < MONO_MAX_IREGS && !X86_IS_CALLEE ((r)))
-#define reg_is_freeable(r) ((r) >= 0 && (r) < MONO_MAX_IREGS && X86_IS_CALLEE ((r)))
-
-typedef struct {
- int born_in;
- int killed_in;
- int last_use;
- int prev_use;
- int flags; /* used to track fp spill/load */
-} RegTrack;
-
-static const char*const * ins_spec = pentium_desc;
-
-static void
-print_ins (int i, MonoInst *ins)
-{
- const char *spec = ins_spec [ins->opcode];
- g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
- if (spec [MONO_INST_DEST]) {
- if (ins->dreg >= MONO_MAX_IREGS)
- g_print (" R%d <-", ins->dreg);
- else
- g_print (" %s <-", mono_arch_regname (ins->dreg));
- }
- if (spec [MONO_INST_SRC1]) {
- if (ins->sreg1 >= MONO_MAX_IREGS)
- g_print (" R%d", ins->sreg1);
- else
- g_print (" %s", mono_arch_regname (ins->sreg1));
- }
- if (spec [MONO_INST_SRC2]) {
- if (ins->sreg2 >= MONO_MAX_IREGS)
- g_print (" R%d", ins->sreg2);
- else
- g_print (" %s", mono_arch_regname (ins->sreg2));
- }
- if (spec [MONO_INST_CLOB])
- g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
- g_print ("\n");
-}
-
-static void
-print_regtrack (RegTrack *t, int num)
-{
- int i;
- char buf [32];
- const char *r;
-
- for (i = 0; i < num; ++i) {
- if (!t [i].born_in)
- continue;
- if (i >= MONO_MAX_IREGS) {
- g_snprintf (buf, sizeof(buf), "R%d", i);
- r = buf;
- } else
- r = mono_arch_regname (i);
- g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
- }
-}
-
-typedef struct InstList InstList;
-
-struct InstList {
- InstList *prev;
- InstList *next;
- MonoInst *data;
-};
-
-static inline InstList*
-inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
-{
- InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
- item->data = data;
- item->prev = NULL;
- item->next = list;
- if (list)
- list->prev = item;
- return item;
-}
-
-/*
- * Force the spilling of the variable in the symbolic register 'reg'.
- */
-static int
-get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg)
-{
- MonoInst *load;
- int i, sel, spill;
-
- sel = cfg->rs->iassign [reg];
- /*i = cfg->rs->isymbolic [sel];
- g_assert (i == reg);*/
- i = reg;
- spill = ++cfg->spill_count;
- cfg->rs->iassign [i] = -spill - 1;
- mono_regstate_free_int (cfg->rs, sel);
- /* we need to create a spill var and insert a load to sel after the current instruction */
- MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
- load->dreg = sel;
- load->inst_basereg = X86_EBP;
- load->inst_offset = mono_spillvar_offset (cfg, spill);
- if (item->prev) {
- while (ins->next != item->prev->data)
- ins = ins->next;
- }
- load->next = ins->next;
- ins->next = load;
- DEBUG (g_print ("SPILLED LOAD (%d at 0x%08x(%%ebp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
- i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
- g_assert (i == sel);
-
- return sel;
-}
-
-static int
-get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
-{
- MonoInst *load;
- int i, sel, spill;
-
- DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
- /* exclude the registers in the current instruction */
- if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
- if (ins->sreg1 >= MONO_MAX_IREGS)
- regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
- else
- regmask &= ~ (1 << ins->sreg1);
- DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
- }
- if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
- if (ins->sreg2 >= MONO_MAX_IREGS)
- regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
- else
- regmask &= ~ (1 << ins->sreg2);
- DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
- }
- if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
- regmask &= ~ (1 << ins->dreg);
- DEBUG (g_print ("\t\texcluding dreg %s\n", mono_arch_regname (ins->dreg)));
- }
-
- DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
- g_assert (regmask); /* need at least a register we can free */
- sel = -1;
- /* we should track prev_use and spill the register that's farther */
- for (i = 0; i < MONO_MAX_IREGS; ++i) {
- if (regmask & (1 << i)) {
- sel = i;
- DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
- break;
- }
- }
- i = cfg->rs->isymbolic [sel];
- spill = ++cfg->spill_count;
- cfg->rs->iassign [i] = -spill - 1;
- mono_regstate_free_int (cfg->rs, sel);
- /* we need to create a spill var and insert a load to sel after the current instruction */
- MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
- load->dreg = sel;
- load->inst_basereg = X86_EBP;
- load->inst_offset = mono_spillvar_offset (cfg, spill);
- if (item->prev) {
- while (ins->next != item->prev->data)
- ins = ins->next;
- }
- load->next = ins->next;
- ins->next = load;
- DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08x(%%ebp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
- i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
- g_assert (i == sel);
-
- return sel;
-}
-
-static MonoInst*
-create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins)
-{
- MonoInst *copy;
- MONO_INST_NEW (cfg, copy, OP_MOVE);
- copy->dreg = dest;
- copy->sreg1 = src;
- if (ins) {
- copy->next = ins->next;
- ins->next = copy;
- }
- DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
- return copy;
-}
-
-static MonoInst*
-create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
-{
- MonoInst *store;
- MONO_INST_NEW (cfg, store, OP_STORE_MEMBASE_REG);
- store->sreg1 = reg;
- store->inst_destbasereg = X86_EBP;
- store->inst_offset = mono_spillvar_offset (cfg, spill);
- if (ins) {
- store->next = ins->next;
- ins->next = store;
- }
- DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08x(%%ebp)) R%d (from %s)\n", spill, store->inst_offset, prev_reg, mono_arch_regname (reg)));
- return store;
-}
-
-static void
-insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
-{
- MonoInst *prev;
- if (item->next) {
- prev = item->next->data;
-
- while (prev->next != ins)
- prev = prev->next;
- to_insert->next = ins;
- prev->next = to_insert;
- } else {
- to_insert->next = ins;
- }
- /*
- * needed otherwise in the next instruction we can add an ins to the
- * end and that would get past this instruction.
- */
- item->data = to_insert;
-}
-
-
-#if 0
-static int
-alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
-{
- int val = cfg->rs->iassign [sym_reg];
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_regstate_alloc_int (cfg->rs, allow_mask);
- if (val < 0)
- val = get_register_spilling (cfg, curinst, ins, allow_mask, sym_reg);
- cfg->rs->iassign [sym_reg] = val;
- /* add option to store before the instruction for src registers */
- if (spill)
- create_spilled_store (cfg, spill, val, sym_reg, ins);
- }
- cfg->rs->isymbolic [val] = sym_reg;
- return val;
-}
-#endif
-
-/* flags used in reginfo->flags */
-enum {
- MONO_X86_FP_NEEDS_LOAD_SPILL = 1 << 0,
- MONO_X86_FP_NEEDS_SPILL = 1 << 1,
- MONO_X86_FP_NEEDS_LOAD = 1 << 2,
- MONO_X86_REG_NOT_ECX = 1 << 3,
- MONO_X86_REG_EAX = 1 << 4,
- MONO_X86_REG_EDX = 1 << 5,
- MONO_X86_REG_ECX = 1 << 6
-};
-
-static int
-mono_x86_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
-{
- int val;
- int test_mask = dest_mask;
-
- if (flags & MONO_X86_REG_EAX)
- test_mask &= (1 << X86_EAX);
- else if (flags & MONO_X86_REG_EDX)
- test_mask &= (1 << X86_EDX);
- else if (flags & MONO_X86_REG_ECX)
- test_mask &= (1 << X86_ECX);
- else if (flags & MONO_X86_REG_NOT_ECX)
- test_mask &= ~ (1 << X86_ECX);
-
- val = mono_regstate_alloc_int (cfg->rs, test_mask);
- if (val >= 0 && test_mask != dest_mask)
- DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
-
- if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
- DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
- val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << X86_ECX)));
- }
-
- if (val < 0) {
- val = mono_regstate_alloc_int (cfg->rs, dest_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg);
- }
-
- return val;
-}
-
-static inline void
-assign_ireg (MonoRegState *rs, int reg, int hreg)
-{
- g_assert (reg >= MONO_MAX_IREGS);
- g_assert (hreg < MONO_MAX_IREGS);
- g_assert (! is_global_ireg (hreg));
-
- rs->iassign [reg] = hreg;
- rs->isymbolic [hreg] = reg;
- rs->ifree_mask &= ~ (1 << hreg);
-}
-
-/*#include "cprop.c"*/
-
-/*
- * Local register allocation.
- * We first scan the list of instructions and we save the liveness info of
- * each register (when the register is first used, when it's value is set etc.).
- * We also reverse the list of instructions (in the InstList list) because assigning
- * registers backwards allows for more tricks to be used.
- */
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- MonoInst *ins;
- MonoRegState *rs = cfg->rs;
- int i, val, fpcount;
- RegTrack *reginfo, *reginfof;
- RegTrack *reginfo1, *reginfo2, *reginfod;
- InstList *tmp, *reversed = NULL;
- const char *spec;
- guint32 src1_mask, src2_mask, dest_mask;
- GList *fspill_list = NULL;
- int fspill = 0;
-
- if (!bb->code)
- return;
- rs->next_vireg = bb->max_ireg;
- rs->next_vfreg = bb->max_freg;
- mono_regstate_assign (rs);
- reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
- reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
- rs->ifree_mask = X86_CALLEE_REGS;
-
- ins = bb->code;
-
- /*if (cfg->opt & MONO_OPT_COPYPROP)
- local_copy_prop (cfg, ins);*/
-
- i = 1;
- fpcount = 0;
- DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
- /* forward pass on the instructions to collect register liveness info */
- while (ins) {
- spec = ins_spec [ins->opcode];
-
- DEBUG (print_ins (i, ins));
-
- if (spec [MONO_INST_SRC1]) {
- if (spec [MONO_INST_SRC1] == 'f') {
- GList *spill;
- reginfo1 = reginfof;
-
- spill = g_list_first (fspill_list);
- if (spill && fpcount < MONO_MAX_FREGS) {
- reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
- fspill_list = g_list_remove (fspill_list, spill->data);
- } else
- fpcount--;
- }
- else
- reginfo1 = reginfo;
- reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
- reginfo1 [ins->sreg1].last_use = i;
- if (spec [MONO_INST_SRC1] == 'L') {
- /* The virtual register is allocated sequentially */
- reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
- reginfo1 [ins->sreg1 + 1].last_use = i;
- if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
- reginfo1 [ins->sreg1 + 1].born_in = i;
-
- reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
- reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
- }
- } else {
- ins->sreg1 = -1;
- }
- if (spec [MONO_INST_SRC2]) {
- if (spec [MONO_INST_SRC2] == 'f') {
- GList *spill;
- reginfo2 = reginfof;
- spill = g_list_first (fspill_list);
- if (spill) {
- reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
- fspill_list = g_list_remove (fspill_list, spill->data);
- if (fpcount >= MONO_MAX_FREGS) {
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
- }
- } else
- fpcount--;
- }
- else
- reginfo2 = reginfo;
- reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
- reginfo2 [ins->sreg2].last_use = i;
- if (spec [MONO_INST_SRC2] == 'L') {
- /* The virtual register is allocated sequentially */
- reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
- reginfo2 [ins->sreg2 + 1].last_use = i;
- if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
- reginfo2 [ins->sreg2 + 1].born_in = i;
- }
- if (spec [MONO_INST_CLOB] == 's') {
- reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
- reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
- }
- } else {
- ins->sreg2 = -1;
- }
- if (spec [MONO_INST_DEST]) {
- if (spec [MONO_INST_DEST] == 'f') {
- reginfod = reginfof;
- if (fpcount >= MONO_MAX_FREGS) {
- reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- fpcount--;
- }
- fpcount++;
- }
- else
- reginfod = reginfo;
- if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
- reginfod [ins->dreg].killed_in = i;
- reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
- reginfod [ins->dreg].last_use = i;
- if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
- reginfod [ins->dreg].born_in = i;
- if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
- /* The virtual register is allocated sequentially */
- reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
- reginfod [ins->dreg + 1].last_use = i;
- if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
- reginfod [ins->dreg + 1].born_in = i;
-
- reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
- reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
- }
- } else {
- ins->dreg = -1;
- }
-
- reversed = inst_list_prepend (cfg->mempool, reversed, ins);
- ++i;
- ins = ins->next;
- }
-
- // todo: check if we have anything left on fp stack, in verify mode?
- fspill = 0;
-
- DEBUG (print_regtrack (reginfo, rs->next_vireg));
- DEBUG (print_regtrack (reginfof, rs->next_vfreg));
- tmp = reversed;
- while (tmp) {
- int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
- dest_mask = src1_mask = src2_mask = X86_CALLEE_REGS;
- --i;
- ins = tmp->data;
- spec = ins_spec [ins->opcode];
- prev_dreg = -1;
- clob_dreg = -1;
- DEBUG (g_print ("processing:"));
- DEBUG (print_ins (i, ins));
- if (spec [MONO_INST_CLOB] == 's') {
- /*
- * Shift opcodes, SREG2 must be RCX
- */
- if (rs->ifree_mask & (1 << X86_ECX)) {
- if (ins->sreg2 < MONO_MAX_IREGS) {
- /* Argument already in hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, X86_ECX, ins->sreg2, NULL);
- insert_before_ins (ins, tmp, copy);
- }
- else {
- DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
- assign_ireg (rs, ins->sreg2, X86_ECX);
- }
- } else {
- int need_ecx_spill = TRUE;
- /*
- * we first check if src1/dreg is already assigned a register
- * and then we force a spill of the var assigned to ECX.
- */
- /* the destination register can't be ECX */
- dest_mask &= ~ (1 << X86_ECX);
- src1_mask &= ~ (1 << X86_ECX);
- val = rs->iassign [ins->dreg];
- /*
- * the destination register is already assigned to ECX:
- * we need to allocate another register for it and then
- * copy from this to ECX.
- */
- if (val == X86_ECX && ins->dreg != ins->sreg2) {
- int new_dest;
- new_dest = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- g_assert (new_dest >= 0);
- DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
-
- rs->isymbolic [new_dest] = ins->dreg;
- rs->iassign [ins->dreg] = new_dest;
- clob_dreg = ins->dreg;
- ins->dreg = new_dest;
- create_copy_ins (cfg, X86_ECX, new_dest, ins);
- need_ecx_spill = FALSE;
- /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
- val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
- rs->iassign [ins->dreg] = val;
- rs->isymbolic [val] = prev_dreg;
- ins->dreg = val;*/
- }
- if (is_global_ireg (ins->sreg2)) {
- MonoInst *copy = create_copy_ins (cfg, X86_ECX, ins->sreg2, NULL);
- insert_before_ins (ins, tmp, copy);
- }
- else {
- val = rs->iassign [ins->sreg2];
- if (val >= 0 && val != X86_ECX) {
- MonoInst *move = create_copy_ins (cfg, X86_ECX, val, NULL);
- DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
- move->next = ins;
- g_assert_not_reached ();
- /* FIXME: where is move connected to the instruction list? */
- //tmp->prev->data->next = move;
- }
- else {
- if (val == X86_ECX)
- need_ecx_spill = FALSE;
- }
- }
- if (need_ecx_spill && !(rs->ifree_mask & (1 << X86_ECX))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [X86_ECX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_ECX]);
- mono_regstate_free_int (rs, X86_ECX);
- }
- if (!is_global_ireg (ins->sreg2))
- /* force-set sreg2 */
- assign_ireg (rs, ins->sreg2, X86_ECX);
- }
- ins->sreg2 = X86_ECX;
- } else if (spec [MONO_INST_CLOB] == 'd') {
- /*
- * DIVISION/REMAINER
- */
- int dest_reg = X86_EAX;
- int clob_reg = X86_EDX;
- if (spec [MONO_INST_DEST] == 'd') {
- dest_reg = X86_EDX; /* reminder */
- clob_reg = X86_EAX;
- }
- if (is_global_ireg (ins->dreg))
- val = ins->dreg;
- else
- val = rs->iassign [ins->dreg];
- if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg]);
- mono_regstate_free_int (rs, dest_reg);
- }
- if (val < 0) {
- if (val < -1) {
- /* the register gets spilled after this inst */
- int spill = -val -1;
- dest_mask = 1 << dest_reg;
- prev_dreg = ins->dreg;
- val = mono_regstate_alloc_int (rs, dest_mask);
- if (val < 0)
- val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
- rs->iassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins);
- DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
- rs->isymbolic [val] = prev_dreg;
- ins->dreg = val;
- } else {
- DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
- prev_dreg = ins->dreg;
- assign_ireg (rs, ins->dreg, dest_reg);
- ins->dreg = dest_reg;
- val = dest_reg;
- }
- }
-
- //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
- if (val != dest_reg) { /* force a copy */
- create_copy_ins (cfg, val, dest_reg, ins);
- if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg]);
- mono_regstate_free_int (rs, dest_reg);
- }
- }
- if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= 8)) {
- DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg]);
- mono_regstate_free_int (rs, clob_reg);
- }
- src1_mask = 1 << X86_EAX;
- src2_mask = 1 << X86_ECX;
- } else if (spec [MONO_INST_DEST] == 'l') {
- int hreg;
- val = rs->iassign [ins->dreg];
- /* check special case when dreg have been moved from ecx (clob shift) */
- if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
- hreg = clob_dreg + 1;
- else
- hreg = ins->dreg + 1;
-
- /* base prev_dreg on fixed hreg, handle clob case */
- val = hreg - 1;
-
- if (val != rs->isymbolic [X86_EAX] && !(rs->ifree_mask & (1 << X86_EAX))) {
- DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [X86_EAX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_EAX]);
- mono_regstate_free_int (rs, X86_EAX);
- }
- if (hreg != rs->isymbolic [X86_EDX] && !(rs->ifree_mask & (1 << X86_EDX))) {
- DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [X86_EDX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_EDX]);
- mono_regstate_free_int (rs, X86_EDX);
- }
- } else if (spec [MONO_INST_CLOB] == 'b') {
- /*
- * x86_set_reg instructions, dreg needs to be EAX..EDX
- */
- dest_mask = (1 << X86_EAX) | (1 << X86_EBX) | (1 << X86_ECX) | (1 << X86_EDX);
- if ((ins->dreg < MONO_MAX_IREGS) && (! (dest_mask & (1 << ins->dreg)))) {
- /*
- * ins->dreg is already a hard reg, need to allocate another
- * suitable hard reg and make a copy.
- */
- int new_dest = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- g_assert (new_dest >= 0);
-
- create_copy_ins (cfg, ins->dreg, new_dest, ins);
- DEBUG (g_print ("\tclob:b changing dreg R%d to %s\n", ins->dreg, mono_arch_regname (new_dest)));
- ins->dreg = new_dest;
-
- /* The hard reg is no longer needed */
- mono_regstate_free_int (rs, new_dest);
- }
- }
-
- /*
- * TRACK DREG
- */
- if (spec [MONO_INST_DEST] == 'f') {
- if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
- GList *spill_node;
- MonoInst *store;
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
- insert_before_ins (ins, tmp, store);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- fspill--;
- }
- } else if (spec [MONO_INST_DEST] == 'L') {
- int hreg;
- val = rs->iassign [ins->dreg];
- /* check special case when dreg have been moved from ecx (clob shift) */
- if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
- hreg = clob_dreg + 1;
- else
- hreg = ins->dreg + 1;
-
- /* base prev_dreg on fixed hreg, handle clob case */
- prev_dreg = hreg - 1;
-
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- rs->iassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins);
- }
-
- DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
-
- rs->isymbolic [val] = hreg - 1;
- ins->dreg = val;
-
- val = rs->iassign [hreg];
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
- rs->iassign [hreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, hreg, ins);
- }
-
- DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
- rs->isymbolic [val] = hreg;
- /* save reg allocating into unused */
- ins->unused = val;
-
- /* check if we can free our long reg */
- if (reg_is_freeable (val) && hreg >= 0 && reginfo [hreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
- mono_regstate_free_int (rs, val);
- }
- }
- else if (ins->dreg >= MONO_MAX_IREGS) {
- int hreg;
- val = rs->iassign [ins->dreg];
- if (spec [MONO_INST_DEST] == 'l') {
- /* check special case when dreg have been moved from ecx (clob shift) */
- if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
- hreg = clob_dreg + 1;
- else
- hreg = ins->dreg + 1;
-
- /* base prev_dreg on fixed hreg, handle clob case */
- prev_dreg = hreg - 1;
- } else
- prev_dreg = ins->dreg;
-
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
- rs->iassign [ins->dreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins);
- }
- DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
- rs->isymbolic [val] = prev_dreg;
- ins->dreg = val;
- /* handle cases where lreg needs to be eax:edx */
- if (spec [MONO_INST_DEST] == 'l') {
- /* check special case when dreg have been moved from ecx (clob shift) */
- int hreg = prev_dreg + 1;
- val = rs->iassign [hreg];
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
- rs->iassign [hreg] = val;
- if (spill)
- create_spilled_store (cfg, spill, val, hreg, ins);
- }
- DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
- rs->isymbolic [val] = hreg;
- if (ins->dreg == X86_EAX) {
- if (val != X86_EDX)
- create_copy_ins (cfg, val, X86_EDX, ins);
- } else if (ins->dreg == X86_EDX) {
- if (val == X86_EAX) {
- /* swap */
- g_assert_not_reached ();
- } else {
- /* two forced copies */
- create_copy_ins (cfg, val, X86_EDX, ins);
- create_copy_ins (cfg, ins->dreg, X86_EAX, ins);
- }
- } else {
- if (val == X86_EDX) {
- create_copy_ins (cfg, ins->dreg, X86_EAX, ins);
- } else {
- /* two forced copies */
- create_copy_ins (cfg, val, X86_EDX, ins);
- create_copy_ins (cfg, ins->dreg, X86_EAX, ins);
- }
- }
- if (reg_is_freeable (val) && hreg >= 0 && reginfo [hreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
- mono_regstate_free_int (rs, val);
- }
- } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != X86_EAX && spec [MONO_INST_CLOB] != 'd') {
- /* this instruction only outputs to EAX, need to copy */
- create_copy_ins (cfg, ins->dreg, X86_EAX, ins);
- } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != X86_EDX && spec [MONO_INST_CLOB] != 'd') {
- create_copy_ins (cfg, ins->dreg, X86_EDX, ins);
- }
- }
- if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
- DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
- mono_regstate_free_int (rs, ins->dreg);
- }
- /* put src1 in EAX if it needs to be */
- if (spec [MONO_INST_SRC1] == 'a') {
- if (!(rs->ifree_mask & (1 << X86_EAX))) {
- DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [X86_EAX]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [X86_EAX]);
- mono_regstate_free_int (rs, X86_EAX);
- }
- if (ins->sreg1 < MONO_MAX_IREGS) {
- /* The argument is already in a hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, X86_EAX, ins->sreg1, NULL);
- insert_before_ins (ins, tmp, copy);
- }
- else
- /* force-set sreg1 */
- assign_ireg (rs, ins->sreg1, X86_EAX);
- ins->sreg1 = X86_EAX;
- }
-
- /*
- * TRACK SREG1
- */
- if (spec [MONO_INST_SRC1] == 'f') {
- if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
- MonoInst *load;
- MonoInst *store = NULL;
-
- if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
- GList *spill_node;
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- }
-
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
- insert_before_ins (ins, tmp, load);
- if (store)
- insert_before_ins (load, tmp, store);
- }
- } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
- /* force source to be same as dest */
- assign_ireg (rs, ins->sreg1, ins->dreg);
- assign_ireg (rs, ins->sreg1 + 1, ins->unused);
-
- DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
- DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
-
- ins->sreg1 = ins->dreg;
- /*
- * No need for saving the reg, we know that src1=dest in this cases
- * ins->inst_c0 = ins->unused;
- */
- }
- else if (ins->sreg1 >= MONO_MAX_IREGS) {
- val = rs->iassign [ins->sreg1];
- prev_sreg1 = ins->sreg1;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- if (0 && ins->opcode == OP_MOVE) {
- /*
- * small optimization: the dest register is already allocated
- * but the src one is not: we can simply assign the same register
- * here and peephole will get rid of the instruction later.
- * This optimization may interfere with the clobbering handling:
- * it removes a mov operation that will be added again to handle clobbering.
- * There are also some other issues that should with make testjit.
- */
- mono_regstate_alloc_int (rs, 1 << ins->dreg);
- val = rs->iassign [ins->sreg1] = ins->dreg;
- //g_assert (val >= 0);
- DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
- } else {
- //g_assert (val == -1); /* source cannot be spilled */
- val = mono_x86_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
- rs->iassign [ins->sreg1] = val;
- DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
- }
- if (spill) {
- MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL);
- insert_before_ins (ins, tmp, store);
- }
- }
- rs->isymbolic [val] = prev_sreg1;
- ins->sreg1 = val;
- } else {
- prev_sreg1 = -1;
- }
- /* handle clobbering of sreg1 */
- if ((spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
- MonoInst *sreg2_copy = NULL;
-
- if (ins->dreg == ins->sreg2) {
- /*
- * copying sreg1 to dreg could clobber sreg2, so allocate a new
- * register for it.
- */
- int reg2 = 0;
-
- reg2 = mono_x86_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
-
- DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_arch_regname (ins->sreg2), mono_arch_regname (reg2)));
- sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL);
- prev_sreg2 = ins->sreg2 = reg2;
-
- mono_regstate_free_int (rs, reg2);
- }
-
- MonoInst *copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL);
- DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_arch_regname (ins->sreg1), mono_arch_regname (ins->dreg)));
- insert_before_ins (ins, tmp, copy);
-
- if (sreg2_copy)
- insert_before_ins (copy, tmp, sreg2_copy);
-
- /*
- * Need to prevent sreg2 to be allocated to sreg1, since that
- * would screw up the previous copy.
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * CONV_I2/U2 reg1, reg2
*/
- src2_mask &= ~ (1 << ins->sreg1);
- /* we set sreg1 to dest as well */
- prev_sreg1 = ins->sreg1 = ins->dreg;
- src2_mask &= ~ (1 << ins->dreg);
- }
-
- /*
- * TRACK SREG2
- */
- if (spec [MONO_INST_SRC2] == 'f') {
- if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
- MonoInst *load;
- MonoInst *store = NULL;
-
- if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
- GList *spill_node;
-
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
- if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
- spill_node = g_list_next (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- }
-
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
- insert_before_ins (ins, tmp, load);
- if (store)
- insert_before_ins (load, tmp, store);
+ if (last_ins && X86_IS_BYTE_REG (last_ins->sreg1) &&
+ (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+ ins->sreg1 = last_ins->sreg1;
}
- }
- else if (ins->sreg2 >= MONO_MAX_IREGS) {
- val = rs->iassign [ins->sreg2];
- prev_sreg2 = ins->sreg2;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
- val = mono_x86_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
- rs->iassign [ins->sreg2] = val;
- DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
- if (spill)
- create_spilled_store (cfg, spill, val, prev_sreg2, ins);
+ break;
+ case OP_LOADU2_MEMBASE:
+ case OP_LOADI2_MEMBASE:
+ /*
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * CONV_I2/U2 reg1, reg2
+ */
+ if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+ ins->sreg1 = last_ins->sreg1;
}
- rs->isymbolic [val] = prev_sreg2;
- ins->sreg2 = val;
- if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != X86_ECX) {
- DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [X86_ECX]));
+ break;
+ case CEE_CONV_I4:
+ case CEE_CONV_U4:
+ case OP_MOVE:
+ /*
+ * Removes:
+ *
+ * OP_MOVE reg, reg
+ */
+ if (ins->dreg == ins->sreg1) {
+ if (last_ins)
+ last_ins->next = ins->next;
+ ins = ins->next;
+ continue;
}
- } else {
- prev_sreg2 = -1;
- }
-
- if (spec [MONO_INST_CLOB] == 'c') {
- int j, s;
- guint32 clob_mask = X86_CALLEE_REGS;
- for (j = 0; j < MONO_MAX_IREGS; ++j) {
- s = 1 << j;
- if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
- //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
- }
+ /*
+ * Removes:
+ *
+ * OP_MOVE sreg, dreg
+ * OP_MOVE dreg, sreg
+ */
+ if (last_ins && last_ins->opcode == OP_MOVE &&
+ ins->sreg1 == last_ins->dreg &&
+ ins->dreg == last_ins->sreg1) {
+ last_ins->next = ins->next;
+ ins = ins->next;
+ continue;
}
- }
- if (spec [MONO_INST_CLOB] == 'a') {
- guint32 clob_reg = X86_EAX;
- if (!(rs->ifree_mask & (1 << clob_reg)) && (rs->isymbolic [clob_reg] >= 8)) {
- DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
- get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg]);
- mono_regstate_free_int (rs, clob_reg);
+ break;
+
+ case OP_X86_PUSH_MEMBASE:
+ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
+ last_ins->opcode == OP_STORE_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = OP_X86_PUSH;
+ ins->sreg1 = last_ins->sreg1;
}
+ break;
}
- /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
- DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
- mono_regstate_free_int (rs, ins->sreg1);
- }
- if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
- DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
- mono_regstate_free_int (rs, ins->sreg2);
- }*/
-
- //DEBUG (print_ins (i, ins));
- /* this may result from a insert_before call */
- if (!tmp->next)
- bb->code = tmp->data;
- tmp = tmp->next;
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
+}
+
+static const int
+branch_cc_table [] = {
+ X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
+ X86_CC_NE, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
+ X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
+};
- g_free (reginfo);
- g_free (reginfof);
- g_list_free (fspill_list);
+static const char*const * ins_spec = pentium_desc;
+
+/*#include "cprop.c"*/
+void
+mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ mono_local_regalloc (cfg, bb);
}
static unsigned char*
mono_emit_stack_alloc (guchar *code, MonoInst* tree)
{
int sreg = tree->sreg1;
-#ifdef PLATFORM_WIN32
- guint8* br[5];
+ int need_touch = FALSE;
- /*
- * Under Windows:
- * If requested stack size is larger than one page,
- * perform stack-touch operation
- */
- /*
- * Generate stack probe code.
- * Under Windows, it is necessary to allocate one page at a time,
- * "touching" stack after each successful sub-allocation. This is
- * because of the way stack growth is implemented - there is a
- * guard page before the lowest stack page that is currently commited.
- * Stack normally grows sequentially so OS traps access to the
- * guard page and commits more pages when needed.
- */
- x86_test_reg_imm (code, sreg, ~0xFFF);
- br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
-
- br[2] = code; /* loop */
- x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
- x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
- x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
- x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
- br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
- x86_patch (br[3], br[2]);
- x86_test_reg_reg (code, sreg, sreg);
- br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
- x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
-
- br[1] = code; x86_jump8 (code, 0);
-
- x86_patch (br[0], code);
- x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
- x86_patch (br[1], code);
- x86_patch (br[4], code);
-#else /* PLATFORM_WIN32 */
- x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1);
+#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+ need_touch = TRUE;
#endif
+
+ if (need_touch) {
+ guint8* br[5];
+
+ /*
+ * Under Windows:
+ * If requested stack size is larger than one page,
+ * perform stack-touch operation
+ */
+ /*
+ * Generate stack probe code.
+ * Under Windows, it is necessary to allocate one page at a time,
+ * "touching" stack after each successful sub-allocation. This is
+ * because of the way stack growth is implemented - there is a
+ * guard page before the lowest stack page that is currently commited.
+ * Stack normally grows sequentially so OS traps access to the
+ * guard page and commits more pages when needed.
+ */
+ x86_test_reg_imm (code, sreg, ~0xFFF);
+ br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
+
+ br[2] = code; /* loop */
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
+ x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
+
+ /*
+ * By the end of the loop, sreg2 is smaller than 0x1000, so the init routine
+ * that follows only initializes the last part of the area.
+ */
+ /* Same as the init code below with size==0x1000 */
+ if (tree->flags & MONO_INST_INIT) {
+ x86_push_reg (code, X86_EAX);
+ x86_push_reg (code, X86_ECX);
+ x86_push_reg (code, X86_EDI);
+ x86_mov_reg_imm (code, X86_ECX, (0x1000 >> 2));
+ x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
+ x86_lea_membase (code, X86_EDI, X86_ESP, 12);
+ x86_cld (code);
+ x86_prefix (code, X86_REP_PREFIX);
+ x86_stosl (code);
+ x86_pop_reg (code, X86_EDI);
+ x86_pop_reg (code, X86_ECX);
+ x86_pop_reg (code, X86_EAX);
+ }
+
+ x86_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
+ x86_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
+ br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
+ x86_patch (br[3], br[2]);
+ x86_test_reg_reg (code, sreg, sreg);
+ br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
+ x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
+
+ br[1] = code; x86_jump8 (code, 0);
+
+ x86_patch (br[0], code);
+ x86_alu_reg_reg (code, X86_SUB, X86_ESP, sreg);
+ x86_patch (br[1], code);
+ x86_patch (br[4], code);
+ }
+ else
+ x86_alu_reg_reg (code, X86_SUB, X86_ESP, tree->sreg1);
+
if (tree->flags & MONO_INST_INIT) {
int offset = 0;
if (tree->dreg != X86_EAX && sreg != X86_EAX) {
return code;
}
+static guint8*
+emit_tls_get (guint8* code, int dreg, int tls_offset)
+{
+#ifdef PLATFORM_WIN32
+ /*
+ * See the Under the Hood article in the May 1996 issue of Microsoft Systems
+ * Journal and/or a disassembly of the TlsGet () function.
+ */
+ g_assert (tls_offset < 64);
+ x86_prefix (code, X86_FS_PREFIX);
+ x86_mov_reg_mem (code, dreg, 0x18, 4);
+ /* Dunno what this does but TlsGetValue () contains it */
+ x86_alu_membase_imm (code, X86_AND, dreg, 0x34, 0);
+ x86_mov_reg_membase (code, dreg, dreg, 3600 + (tls_offset * 4), 4);
+#else
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_reg_mem (code, dreg, tls_offset, 4);
+#endif
+ return code;
+}
+
#define REAL_PRINT_REG(text,reg) \
mono_assert (reg >= 0); \
x86_push_reg (code, X86_EAX); \
case OP_X86_COMPARE_REG_MEMBASE:
x86_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
break;
+ case OP_X86_COMPARE_MEM_IMM:
+ x86_alu_mem_imm (code, X86_CMP, ins->inst_offset, ins->inst_imm);
+ break;
case OP_X86_TEST_NULL:
x86_test_reg_reg (code, ins->sreg1, ins->sreg1);
break;
x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
break;
case OP_MUL_IMM:
- x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
+ switch (ins->inst_imm) {
+ case 2:
+ /* MOV r1, r2 */
+ /* ADD r1, r1 */
+ if (ins->dreg != ins->sreg1)
+ x86_mov_reg_reg (code, ins->dreg, ins->sreg1, 4);
+ x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+ break;
+ case 3:
+ /* LEA r1, [r2 + r2*2] */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+ break;
+ case 5:
+ /* LEA r1, [r2 + r2*4] */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ break;
+ case 6:
+ /* LEA r1, [r2 + r2*2] */
+ /* ADD r1, r1 */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+ x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+ break;
+ case 9:
+ /* LEA r1, [r2 + r2*8] */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 3);
+ break;
+ case 10:
+ /* LEA r1, [r2 + r2*4] */
+ /* ADD r1, r1 */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ x86_alu_reg_reg (code, X86_ADD, ins->dreg, ins->dreg);
+ break;
+ case 12:
+ /* LEA r1, [r2 + r2*2] */
+ /* SHL r1, 2 */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 1);
+ x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
+ break;
+ case 25:
+ /* LEA r1, [r2 + r2*4] */
+ /* LEA r1, [r1 + r1*4] */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
+ break;
+ case 100:
+ /* LEA r1, [r2 + r2*4] */
+ /* SHL r1, 2 */
+ /* LEA r1, [r1 + r1*4] */
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, 0, ins->sreg1, 2);
+ x86_shift_reg_imm (code, X86_SHL, ins->dreg, 2);
+ x86_lea_memindex (code, ins->dreg, ins->dreg, 0, ins->dreg, 2);
+ break;
+ default:
+ x86_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
+ break;
+ }
break;
case CEE_MUL_OVF:
x86_imul_reg_reg (code, ins->sreg1, ins->sreg2);
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
- if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention)) {
+ if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
/* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
* bytes to pop, we want to use pops. GCC does this (note it won't happen
* for P4 or i686 because gcc will avoid using pop push at all. But we aren't
case OP_CALL_REG:
call = (MonoCallInst*)ins;
x86_call_reg (code, ins->sreg1);
- if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention)) {
+ if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
else
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
x86_call_membase (code, ins->sreg1, ins->inst_offset);
- if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention)) {
+ if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
else
case OP_COND_EXC_NO:
case OP_COND_EXC_C:
case OP_COND_EXC_NC:
- EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ],
- (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
+ EMIT_COND_SYSTEM_EXCEPTION (branch_cc_table [ins->opcode - OP_COND_EXC_EQ], (ins->opcode < OP_COND_EXC_NE_UN), ins->inst_p1);
break;
case CEE_BEQ:
case CEE_BNE_UN:
}
case OP_LCONV_TO_OVF_I: {
guint8 *br [3], *label [1];
+ MonoInst *tins;
/*
* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
label [0] = code;
/* throw exception */
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
- x86_jump32 (code, 0);
+ tins = mono_branch_optimize_exception_target (cfg, bb, "OverflowException");
+ if (tins) {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, tins->inst_true_bb);
+ if ((cfg->opt & MONO_OPT_BRANCH) && x86_is_imm8 (tins->inst_true_bb->max_offset - cpos))
+ x86_jump8 (code, 0);
+ else
+ x86_jump32 (code, 0);
+ } else {
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "OverflowException");
+ x86_jump32 (code, 0);
+ }
+
x86_patch (br [0], code);
/* our top bit is set, check that top word is 0xfffffff */
break;
}
case OP_TLS_GET: {
- x86_prefix (code, X86_GS_PREFIX);
- x86_mov_reg_mem (code, ins->dreg, ins->inst_offset, 4);
+ code = emit_tls_get (code, ins->dreg, ins->inst_offset);
+ break;
+ }
+ case OP_MEMORY_BARRIER: {
+ /* Not needed on x86 */
break;
}
case OP_ATOMIC_ADD_I4: {
x86_push_reg (code, X86_EBP);
x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
- alloc_size = - cfg->stack_offset;
+ alloc_size = cfg->stack_offset;
pos = 0;
+ if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ /* Might need to attach the thread to the JIT */
+ if (lmf_tls_offset != -1) {
+ guint8 *buf;
+
+ code = emit_tls_get ( code, X86_EAX, lmf_tls_offset);
+ x86_test_reg_reg (code, X86_EAX, X86_EAX);
+ buf = code;
+ x86_branch8 (code, X86_CC_NE, 0, 0);
+ x86_push_imm (code, cfg->domain);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
+ x86_patch (buf, code);
+#ifdef PLATFORM_WIN32
+ /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
+ /* FIXME: Add a separate key for LMF to avoid this */
+ x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+#endif
+ }
+ else {
+ g_assert (!cfg->compile_aot);
+ x86_push_imm (code, cfg->domain);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
+ }
+ }
+
if (method->save_lmf) {
pos += sizeof (MonoLMF);
*/
if (lmf_tls_offset != -1) {
/* Load lmf quicky using the GS register */
- x86_prefix (code, X86_GS_PREFIX);
- x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
+ code = emit_tls_get (code, X86_EAX, lmf_tls_offset);
+#ifdef PLATFORM_WIN32
+ /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
+ /* FIXME: Add a separate key for LMF to avoid this */
+ x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+#endif
}
else {
if (cfg->compile_aot) {
if (alloc_size) {
/* See mono_emit_stack_alloc */
-#ifdef PLATFORM_WIN32
+#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
while (remaining_size >= 0x1000) {
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
{
MonoMethod *method = cfg->method;
MonoMethodSignature *sig = mono_method_signature (method);
- int pos;
+ int quad, pos;
guint32 stack_to_pop;
guint8 *code;
int max_epilog_size = 16;
+ CallInfo *cinfo;
if (cfg->method->save_lmf)
max_epilog_size += 128;
if (method->save_lmf) {
gint32 prev_lmf_reg;
+ gint32 lmf_offset = -sizeof (MonoLMF);
/* Find a spare register */
switch (sig->ret->type) {
}
/* reg = previous_lmf */
- x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, -32, 4);
+ x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
/* ecx = lmf */
- x86_mov_reg_membase (code, X86_ECX, X86_EBP, -28, 4);
+ x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
/* *(lmf) = previous_lmf */
x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
/* restore caller saved regs */
if (cfg->used_int_regs & (1 << X86_EBX)) {
- x86_mov_reg_membase (code, X86_EBX, X86_EBP, -20, 4);
+ x86_mov_reg_membase (code, X86_EBX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), 4);
}
if (cfg->used_int_regs & (1 << X86_EDI)) {
- x86_mov_reg_membase (code, X86_EDI, X86_EBP, -16, 4);
+ x86_mov_reg_membase (code, X86_EDI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), 4);
}
if (cfg->used_int_regs & (1 << X86_ESI)) {
- x86_mov_reg_membase (code, X86_ESI, X86_EBP, -12, 4);
+ x86_mov_reg_membase (code, X86_ESI, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), 4);
}
/* EBP is restored by LEAVE */
}
}
+ /* Load returned vtypes into registers if needed */
+ cinfo = get_call_info (sig, FALSE);
+ if (cinfo->ret.storage == ArgValuetypeInReg) {
+ for (quad = 0; quad < 2; quad ++) {
+ switch (cinfo->ret.pair_storage [quad]) {
+ case ArgInIReg:
+ x86_mov_reg_membase (code, cinfo->ret.pair_regs [quad], cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), 4);
+ break;
+ case ArgOnFloatFpStack:
+ x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), FALSE);
+ break;
+ case ArgOnDoubleFpStack:
+ x86_fld_membase (code, cfg->ret->inst_basereg, cfg->ret->inst_offset + (quad * sizeof (gpointer)), TRUE);
+ break;
+ case ArgNone:
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+ }
+
x86_leave (code);
- if (CALLCONV_IS_STDCALL (sig->call_convention)) {
+ if (CALLCONV_IS_STDCALL (sig)) {
MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
- } else if (MONO_TYPE_ISSTRUCT (mono_method_signature (cfg->method)->ret))
+ } else if (MONO_TYPE_ISSTRUCT (mono_method_signature (cfg->method)->ret) && (cinfo->ret.storage == ArgOnStack))
stack_to_pop = 4;
else
stack_to_pop = 0;
else
x86_ret (code);
+ g_free (cinfo);
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
-
}
void
static gboolean tls_offset_inited = FALSE;
-/* code should be simply return <tls var>; */
-static int read_tls_offset_from_method (void* method)
-{
- guint8* code = (guint8*) method;
- /*
- * Determine the offset of the variable inside the TLS structures
- * by disassembling the function.
- */
-
- /* gcc-3.3.2
- *
- * push ebp
- * mov ebp, esp
- * mov eax, gs:0
- * mov eax, DWORD PTR [eax+<offset>]
- */
- if (
- (code [0] == 0x55) && (code [1] == 0x89) && (code [2] == 0xe5) &&
- (code [3] == 0x65) && (code [4] == 0xa1) && (code [5] == 0x00) &&
- (code [6] == 0x00) && (code [7] == 0x00) && (code [8] == 0x00) &&
- (code [9] == 0x8b) && (code [10] == 0x80)) {
- return *(int*)&(code [11]);
- }
-
- /* gcc-3.4
- *
- * push ebp
- * mov ebp, esp
- * mov eax, gs:<offset>
- */
- if (
- (code [0] == 0x55) && (code [1] == 0x89) && (code [2] == 0xe5) &&
- (code [3] == 0x65) && (code [4] == 0xa1)) {
- return *(int*)&(code [5]);
- }
-
- /* 3.2.2 with -march=athlon
- *
- * push ebp
- * mov eax, gs:<offset>
- * mov ebp, esp
- */
- if (
- (code [0] == 0x55) && (code [1] == 0x65) && (code [2] == 0xa1)) {
- return *(int*)&(code [3]);
- }
-
- return -1;
-}
void
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- pthread_t self = pthread_self();
- pthread_attr_t attr;
- void *staddr = NULL;
- size_t stsize = 0;
- struct sigaltstack sa;
-#endif
-
if (!tls_offset_inited) {
- tls_offset_inited = TRUE;
- if (getenv ("MONO_NPTL")) {
- lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
- appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
- thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
- }
- }
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-
- /* Determine stack boundaries */
- if (!mono_running_on_valgrind ()) {
-#ifdef HAVE_PTHREAD_GETATTR_NP
- pthread_getattr_np( self, &attr );
-#else
-#ifdef HAVE_PTHREAD_ATTR_GET_NP
- pthread_attr_get_np( self, &attr );
-#elif defined(sun)
- pthread_attr_init( &attr );
- pthread_attr_getstacksize( &attr, &stsize );
+ if (!getenv ("MONO_NO_TLS")) {
+#ifdef PLATFORM_WIN32
+ /*
+ * We need to init this multiple times, since when we are first called, the key might not
+ * be initialized yet.
+ */
+ appdomain_tls_offset = mono_domain_get_tls_key ();
+ lmf_tls_offset = mono_get_jit_tls_key ();
+ thread_tls_offset = mono_thread_get_tls_key ();
+
+ /* Only 64 tls entries can be accessed using inline code */
+ if (appdomain_tls_offset >= 64)
+ appdomain_tls_offset = -1;
+ if (lmf_tls_offset >= 64)
+ lmf_tls_offset = -1;
+ if (thread_tls_offset >= 64)
+ thread_tls_offset = -1;
#else
-#error "Not implemented"
-#endif
-#endif
-#ifndef sun
- pthread_attr_getstack( &attr, &staddr, &stsize );
-#endif
- }
-
- /*
- * staddr seems to be wrong for the main thread, so we keep the value in
- * tls->end_of_stack
- */
- tls->stack_size = stsize;
-
- /* Setup an alternate signal stack */
- tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
- tls->signal_stack_size = SIGNAL_STACK_SIZE;
-
- sa.ss_sp = tls->signal_stack;
- sa.ss_size = SIGNAL_STACK_SIZE;
- sa.ss_flags = SS_ONSTACK;
- sigaltstack (&sa, NULL);
+ tls_offset_inited = TRUE;
+ appdomain_tls_offset = mono_domain_get_tls_offset ();
+ lmf_tls_offset = mono_get_lmf_tls_offset ();
+ thread_tls_offset = mono_thread_get_tls_offset ();
#endif
+ }
+ }
}
void
mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
{
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- struct sigaltstack sa;
-
- sa.ss_sp = tls->signal_stack;
- sa.ss_size = SIGNAL_STACK_SIZE;
- sa.ss_flags = SS_DISABLE;
- sigaltstack (&sa, NULL);
-
- if (tls->signal_stack)
- g_free (tls->signal_stack);
-#endif
}
void
mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
{
+ MonoCallInst *call = (MonoCallInst*)inst;
+ CallInfo *cinfo = get_call_info (inst->signature, FALSE);
/* add the this argument */
if (this_reg != -1) {
- MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_OUTARG);
- this->type = this_type;
- this->sreg1 = this_reg;
- mono_bblock_add_inst (cfg->cbb, this);
+ if (cinfo->args [0].storage == ArgInIReg) {
+ MonoInst *this;
+ MONO_INST_NEW (cfg, this, OP_MOVE);
+ this->type = this_type;
+ this->sreg1 = this_reg;
+ this->dreg = mono_regstate_next_int (cfg->rs);
+ mono_bblock_add_inst (cfg->cbb, this);
+
+ mono_call_inst_add_outarg_reg (call, this->dreg, cinfo->args [0].reg, FALSE);
+ }
+ else {
+ MonoInst *this;
+ MONO_INST_NEW (cfg, this, OP_OUTARG);
+ this->type = this_type;
+ this->sreg1 = this_reg;
+ mono_bblock_add_inst (cfg->cbb, this);
+ }
}
if (vt_reg != -1) {
- CallInfo * cinfo = get_call_info (inst->signature, FALSE);
MonoInst *vtarg;
if (cinfo->ret.storage == ArgValuetypeInReg) {
vtarg->sreg1 = vt_reg;
mono_bblock_add_inst (cfg->cbb, vtarg);
}
- else {
+ else if (cinfo->ret.storage == ArgInIReg) {
+ /* The return address is passed in a register */
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = vt_reg;
+ vtarg->dreg = mono_regstate_next_int (cfg->rs);
+ mono_bblock_add_inst (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ } else {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_OUTARG);
vtarg->type = STACK_MP;
vtarg->sreg1 = vt_reg;
mono_bblock_add_inst (cfg->cbb, vtarg);
}
-
- g_free (cinfo);
}
-}
+ g_free (cinfo);
+}
MonoInst*
mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
ins->inst_i1 = args [1];
}
#endif
+ } else if (cmethod->klass == mono_defaults.thread_class &&
+ strcmp (cmethod->name, "MemoryBarrier") == 0) {
+ MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
} else if(cmethod->klass->image == mono_defaults.corlib &&
(strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
if (appdomain_tls_offset == -1)
return NULL;
-
+
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->inst_offset = appdomain_tls_offset;
return ins;
MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
{
MonoInst* ins;
-
+
if (thread_tls_offset == -1)
return NULL;
-
+
MONO_INST_NEW (cfg, ins, OP_TLS_GET);
ins->inst_offset = thread_tls_offset;
return ins;
return -1;
}
}
+
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+{
+ guint8 reg = 0;
+ gint32 disp = 0;
+
+ /* go to the start of the call instruction
+ *
+ * address_byte = (m << 6) | (o << 3) | reg
+ * call opcode: 0xff address_byte displacement
+ * 0xff m=1,o=2 imm8
+ * 0xff m=2,o=2 imm32
+ */
+ code -= 6;
+ if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
+ reg = code [4] & 0x07;
+ disp = (signed char)code [5];
+ } else {
+ if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
+ reg = code [1] & 0x07;
+ disp = *((gint32*)(code + 2));
+ } else if ((code [1] == 0xe8)) {
+ return NULL;
+ } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
+ /*
+ * This is a interface call: should check the above code can't catch it earlier
+ * 8b 40 30 mov 0x30(%eax),%eax
+ * ff 10 call *(%eax)
+ */
+ disp = 0;
+ reg = code [5] & 0x07;
+ }
+ else
+ return NULL;
+ }
+
+ return (gpointer*)(((gint32)(regs [reg])) + disp);
+}
+
+gpointer*
+mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
+{
+ guint8 reg = 0;
+ gint32 disp = 0;
+
+ code -= 7;
+ if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 3) && (x86_modrm_reg (code [1]) == X86_EAX) && (code [2] == 0x8b) && (code [3] == 0x40) && (code [5] == 0xff) && (code [6] == 0xd0)) {
+ reg = x86_modrm_rm (code [1]);
+ disp = code [4];
+
+ if (reg == X86_EAX)
+ return NULL;
+ else
+ return (gpointer*)(((gint32)(regs [reg])) + disp);
+ }
+
+ return NULL;
+}