#include "mini.h"
#include <string.h>
#include <math.h>
+#ifdef HAVE_UNISTD_H
#include <unistd.h>
+#endif
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include "trace.h"
#include "mini-x86.h"
#include "inssel.h"
-#include "cpu-pentium.h"
+#include "cpu-x86.h"
/* On windows, these hold the key returned by TlsAlloc () */
static gint lmf_tls_offset = -1;
+static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
static gint thread_tls_offset = -1;
#ifdef MONO_XEN_OPT
-/* TRUE by default until we add runtime detection of Xen */
static gboolean optimize_for_xen = TRUE;
#else
#define optimize_for_xen 0
#endif
+#ifdef PLATFORM_WIN32
+static gboolean is_win32 = TRUE;
+#else
+static gboolean is_win32 = FALSE;
+#endif
+
+/* This mutex protects architecture specific caches */
+#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
+#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
+static CRITICAL_SECTION mini_arch_mutex;
+
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define ARGS_OFFSET 8
static X86_Reg_No param_regs [] = { 0 };
-#ifdef PLATFORM_WIN32
+#if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
+#define SMALL_STRUCTS_IN_REGS
static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
#endif
static void
-add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
+add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
if (sig->pinvoke)
size = mono_type_native_stack_size (&klass->byval_arg, NULL);
else
- size = mono_type_stack_size (&klass->byval_arg, NULL);
+ size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
-#ifdef PLATFORM_WIN32
+#ifdef SMALL_STRUCTS_IN_REGS
if (sig->pinvoke && is_return) {
MonoMarshalType *info;
* For x86 win32, see ???.
*/
static CallInfo*
-get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
guint32 i, gr, fr;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
+ MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
- cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ if (mp)
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ else
+ cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
gr = 0;
fr = 0;
/* return value */
{
ret_type = mono_type_get_underlying_type (sig->ret);
+ ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
switch (ret_type->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_VALUETYPE: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
if (cinfo->ret.storage == ArgOnStack)
/* The caller passes the address where the value is stored */
add_general (&gr, &stack_size, &cinfo->ret);
continue;
}
ptype = mono_type_get_underlying_type (sig->params [i]);
+ ptype = mini_get_basic_type_from_generic (gsctx, ptype);
switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
- add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
+ add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_TYPEDBYREF:
stack_size += sizeof (MonoTypedRef);
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k, frame_size = 0;
- int size, align, pad;
+ int size, pad;
+ guint32 align;
int offset = 8;
CallInfo *cinfo;
- cinfo = get_call_info (csig, FALSE);
+ cinfo = get_call_info (NULL, NULL, csig, FALSE);
if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
frame_size += sizeof (gpointer);
if (csig->pinvoke)
size = mono_type_native_stack_size (csig->params [k], &align);
- else
- size = mono_type_stack_size (csig->params [k], &align);
+ else {
+ int ialign;
+ size = mini_type_stack_size (NULL, csig->params [k], &ialign);
+ align = ialign;
+ }
/* ignore alignment for now */
align = 1;
#endif
if (have_cpuid) {
/* Have to use the code manager to get around WinXP DEP */
- MonoCodeManager *codeman = mono_code_manager_new_dynamic ();
- CpuidFunc func;
- void *ptr = mono_code_manager_reserve (codeman, sizeof (cpuid_impl));
- memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
-
- func = (CpuidFunc)ptr;
+ static CpuidFunc func = NULL;
+ void *ptr;
+ if (!func) {
+ ptr = mono_global_codeman_reserve (sizeof (cpuid_impl));
+ memcpy (ptr, cpuid_impl, sizeof (cpuid_impl));
+ func = (CpuidFunc)ptr;
+ }
func (id, p_eax, p_ebx, p_ecx, p_edx);
- mono_code_manager_destroy (codeman);
-
/*
* We use this approach because of issues with gcc and pic code, see:
* http://gcc.gnu.org/cgi-bin/gnatsweb.pl?cmd=view%20audit-trail&database=gcc&pr=7329
#endif
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+ InitializeCriticalSection (&mini_arch_mutex);
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+ DeleteCriticalSection (&mini_arch_mutex);
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
*exclude_mask |= MONO_OPT_FCMOV;
} else
*exclude_mask |= MONO_OPT_CMOV;
+ if (edx & (1 << 26))
+ opts |= MONO_OPT_SSE2;
+ else
+ *exclude_mask |= MONO_OPT_SSE2;
}
return opts;
}
return FALSE;
}
-static gboolean
-is_regsize_var (MonoType *t) {
- if (t->byref)
- return TRUE;
- switch (mono_type_get_underlying_type (t)->type) {
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- return TRUE;
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_STRING:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- return TRUE;
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (t))
- return TRUE;
- return FALSE;
- case MONO_TYPE_VALUETYPE:
- return FALSE;
- }
- return FALSE;
-}
-
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
/* we dont allocate I1 to registers because there is no simply way to sign extend
* 8bit quantities in caller saved registers on x86 */
- if (is_regsize_var (ins->inst_vtype) || (ins->inst_vtype->type == MONO_TYPE_BOOLEAN) ||
- (ins->inst_vtype->type == MONO_TYPE_U1) || (ins->inst_vtype->type == MONO_TYPE_U2)||
- (ins->inst_vtype->type == MONO_TYPE_I2) || (ins->inst_vtype->type == MONO_TYPE_CHAR)) {
+ if (mono_is_regsize_var (ins->inst_vtype) && (ins->inst_vtype->type != MONO_TYPE_I1)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
vars = g_list_prepend (vars, vmv);
header = mono_method_get_header (cfg->method);
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
cfg->frame_reg = MONO_ARCH_BASEREG;
offset = 0;
//printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
}
}
- g_free (offsets);
offset += locals_stack_size;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = X86_EBP;
offset &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
cfg->stack_offset = offset;
-
- g_free (cinfo);
}
void
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
-
- g_free (cinfo);
}
/* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
* currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
*/
+static void
+emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *arg;
+ MonoMethodSignature *tmp_sig;
+ MonoInst *sig_arg;
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+ MONO_INST_NEW (cfg, arg, OP_OUTARG);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->inst_p0 = tmp_sig;
+
+ arg->inst_left = sig_arg;
+ arg->type = STACK_PTR;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+}
+
+/*
+ * It is expensive to adjust esp for each individual fp argument pushed on the stack
+ * so we try to do it just once when we have multiple fp arguments in a row.
+ * We don't use this mechanism generally because for int arguments the generated code
+ * is slightly bigger and new generation cpus optimize away the dependency chains
+ * created by push instructions on the esp value.
+ * fp_arg_setup is the first argument in the execution sequence where the esp register
+ * is modified.
+ */
+static int
+collect_fp_stack_space (MonoMethodSignature *sig, int start_arg, int *fp_arg_setup)
+{
+ int fp_space = 0;
+ MonoType *t;
+
+ for (; start_arg < sig->param_count; ++start_arg) {
+ t = mono_type_get_underlying_type (sig->params [start_arg]);
+ if (!t->byref && t->type == MONO_TYPE_R8) {
+ fp_space += sizeof (double);
+ *fp_arg_setup = start_arg;
+ } else {
+ break;
+ }
+ }
+ return fp_space;
+}
+
/*
* take the arguments and generate the arch-specific
* instructions to properly call the function in call.
MonoMethodSignature *sig;
int i, n;
CallInfo *cinfo;
- int sentinelpos;
+ int sentinelpos = 0;
+ int fp_args_space = 0, fp_args_offset = 0, fp_arg_setup = -1;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
sentinelpos = sig->sentinelpos + (is_virtual ? 1 : 0);
/* Emit the signature cookie just before the implicit arguments */
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
- MonoMethodSignature *tmp_sig;
- MonoInst *sig_arg;
-
- /* FIXME: Add support for signature tokens to AOT */
- cfg->disable_aot = TRUE;
- MONO_INST_NEW (cfg, arg, OP_OUTARG);
-
- /*
- * mono_ArgIterator_Setup assumes the signature cookie is
- * passed first and all the arguments which were before it are
- * passed on the stack after the signature. So compensate by
- * passing a different signature.
- */
- tmp_sig = mono_metadata_signature_dup (call->signature);
- tmp_sig->param_count -= call->signature->sentinelpos;
- tmp_sig->sentinelpos = 0;
- memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
-
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->inst_p0 = tmp_sig;
-
- arg->inst_left = sig_arg;
- arg->type = STACK_PTR;
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
+ emit_sig_cookie (cfg, call);
}
if (is_virtual && i == 0) {
call->out_args = arg;
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
- gint align;
- guint32 size;
+ guint32 size, align;
if (t->type == MONO_TYPE_TYPEDBYREF) {
size = sizeof (MonoTypedRef);
else
if (sig->pinvoke)
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
- else
- size = mono_type_stack_size (&in->klass->byval_arg, &align);
+ else {
+ int ialign;
+ size = mini_type_stack_size (cfg->generic_sharing_context, &in->klass->byval_arg, &ialign);
+ align = ialign;
+ }
arg->opcode = OP_OUTARG_VT;
arg->klass = in->klass;
- arg->unused = sig->pinvoke;
+ arg->backend.is_pinvoke = sig->pinvoke;
arg->inst_imm = size;
}
else {
case ArgOnStack:
arg->opcode = OP_OUTARG;
if (!t->byref) {
- if (t->type == MONO_TYPE_R4)
+ if (t->type == MONO_TYPE_R4) {
arg->opcode = OP_OUTARG_R4;
- else
- if (t->type == MONO_TYPE_R8)
- arg->opcode = OP_OUTARG_R8;
+ } else if (t->type == MONO_TYPE_R8) {
+ arg->opcode = OP_OUTARG_R8;
+ /* we store in the upper bits of backen.arg_info the needed
+ * esp adjustment and in the lower bits the offset from esp
+ * where the arg needs to be stored
+ */
+ if (!fp_args_space) {
+ fp_args_space = collect_fp_stack_space (sig, i - sig->hasthis, &fp_arg_setup);
+ fp_args_offset = fp_args_space;
+ }
+ arg->backend.arg_info = fp_args_space - fp_args_offset;
+ fp_args_offset -= sizeof (double);
+ if (i - sig->hasthis == fp_arg_setup) {
+ arg->backend.arg_info |= fp_args_space << 16;
+ }
+ if (fp_args_offset == 0) {
+ /* the allocated esp stack is finished:
+ * prepare for an eventual second run of fp args
+ */
+ fp_args_space = 0;
+ }
+ }
}
break;
default:
}
}
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
+ emit_sig_cookie (cfg, call);
+ }
+
if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
if (cinfo->ret.storage == ArgValuetypeInReg) {
MonoInst *zero_inst;
}
#endif
- g_free (cinfo);
-
return call;
}
{
guchar *code = p;
+#if __APPLE__
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
+#endif
+
/* if some args are passed in registers, we need to save them here */
x86_push_reg (code, X86_EBP);
mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
x86_call_code (code, 0);
}
+#if __APPLE__
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 16);
+#else
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
+#endif
return code;
}
case MONO_TYPE_R8:
save_mode = SAVE_FP;
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
+ save_mode = SAVE_EAX;
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
save_mode = SAVE_STRUCT;
break;
return code;
}
-/* FIXME: Add more instructions */
-#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI4_MEMBASE_REG))
+#define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_IADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_ISBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB_IMM)))
+/*
+ * peephole_pass_1:
+ *
+ * Perform peephole opts which should/can be performed before local regalloc
+ */
static void
-peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoInst *ins, *last_ins = NULL;
ins = bb->code;
while (ins) {
-
switch (ins->opcode) {
- case OP_ICONST:
- /* reg = 0 -> XOR (reg, reg) */
- /* XOR sets cflags on x86, so we cant do it always */
- if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
- ins->opcode = CEE_XOR;
- ins->sreg1 = ins->dreg;
- ins->sreg2 = ins->dreg;
- }
+ case OP_IADD_IMM:
+ case OP_ADD_IMM:
+ if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
+ /*
+ * X86_LEA is like ADD, but doesn't have the
+ * sreg1==dreg restriction.
+ */
+ ins->opcode = OP_X86_LEA_MEMBASE;
+ ins->inst_basereg = ins->sreg1;
+ } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+ ins->opcode = OP_X86_INC_REG;
break;
- case OP_MUL_IMM:
- /* remove unnecessary multiplication with 1 */
- if (ins->inst_imm == 1) {
- if (ins->dreg != ins->sreg1) {
- ins->opcode = OP_MOVE;
- } else {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- }
- }
+ case OP_SUB_IMM:
+ case OP_ISUB_IMM:
+ if ((ins->sreg1 < MONO_MAX_IREGS) && (ins->dreg >= MONO_MAX_IREGS)) {
+ ins->opcode = OP_X86_LEA_MEMBASE;
+ ins->inst_basereg = ins->sreg1;
+ ins->inst_imm = -ins->inst_imm;
+ } else if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+ ins->opcode = OP_X86_DEC_REG;
break;
case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM:
/* OP_COMPARE_IMM (reg, 0)
* -->
* OP_X86_TEST_NULL (reg)
break;
case CEE_CONV_I4:
case CEE_CONV_U4:
+ case OP_ICONV_TO_I4:
case OP_MOVE:
/*
* Removes:
bb->last_ins = last_ins;
}
+static void
+peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins, *last_ins = NULL;
+ ins = bb->code;
+
+ while (ins) {
+
+ switch (ins->opcode) {
+ case OP_ICONST:
+ /* reg = 0 -> XOR (reg, reg) */
+ /* XOR sets cflags on x86, so we cant do it always */
+ if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
+ MonoInst *ins2;
+
+ ins->opcode = OP_IXOR;
+ ins->sreg1 = ins->dreg;
+ ins->sreg2 = ins->dreg;
+
+ /*
+ * Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
+ * since it takes 3 bytes instead of 7.
+ */
+ for (ins2 = ins->next; ins2; ins2 = ins2->next) {
+ if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
+ ins2->opcode = OP_STORE_MEMBASE_REG;
+ ins2->sreg1 = ins->dreg;
+ }
+ else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
+ ins2->opcode = OP_STOREI4_MEMBASE_REG;
+ ins2->sreg1 = ins->dreg;
+ }
+ else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
+ /* Continue iteration */
+ }
+ else
+ break;
+ }
+ }
+ break;
+ case OP_IADD_IMM:
+ case OP_ADD_IMM:
+ if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+ ins->opcode = OP_X86_INC_REG;
+ break;
+ case OP_ISUB_IMM:
+ case OP_SUB_IMM:
+ if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
+ ins->opcode = OP_X86_DEC_REG;
+ break;
+ case OP_X86_COMPARE_MEMBASE_IMM:
+ /*
+ * OP_STORE_MEMBASE_REG reg, offset(basereg)
+ * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
+ * -->
+ * OP_STORE_MEMBASE_REG reg, offset(basereg)
+ * OP_COMPARE_IMM reg, imm
+ *
+ * Note: if imm = 0 then OP_COMPARE_IMM replaced with OP_X86_TEST_NULL
+ */
+ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = OP_COMPARE_IMM;
+ ins->sreg1 = last_ins->sreg1;
+
+ /* check if we can remove cmp reg,0 with test null */
+ if (!ins->inst_imm)
+ ins->opcode = OP_X86_TEST_NULL;
+ }
+
+ break;
+ case OP_LOAD_MEMBASE:
+ case OP_LOADI4_MEMBASE:
+ /*
+ * Note: if reg1 = reg2 the load op is removed
+ *
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_MOVE reg1, reg2
+ */
+ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
+ || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ if (ins->dreg == last_ins->sreg1) {
+ last_ins->next = ins->next;
+ ins = ins->next;
+ continue;
+ } else {
+ //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
+ ins->opcode = OP_MOVE;
+ ins->sreg1 = last_ins->sreg1;
+ }
+
+ /*
+ * Note: reg1 must be different from the basereg in the second load
+ * Note: if reg1 = reg2 is equal then second load is removed
+ *
+ * OP_LOAD_MEMBASE offset(basereg), reg1
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_LOAD_MEMBASE offset(basereg), reg1
+ * OP_MOVE reg1, reg2
+ */
+ } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
+ || last_ins->opcode == OP_LOAD_MEMBASE) &&
+ ins->inst_basereg != last_ins->dreg &&
+ ins->inst_basereg == last_ins->inst_basereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+
+ if (ins->dreg == last_ins->dreg) {
+ last_ins->next = ins->next;
+ ins = ins->next;
+ continue;
+ } else {
+ ins->opcode = OP_MOVE;
+ ins->sreg1 = last_ins->dreg;
+ }
+
+ //g_assert_not_reached ();
+
+#if 0
+ /*
+ * OP_STORE_MEMBASE_IMM imm, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg
+ * -->
+ * OP_STORE_MEMBASE_IMM imm, offset(basereg)
+ * OP_ICONST reg, imm
+ */
+ } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
+ || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
+ ins->opcode = OP_ICONST;
+ ins->inst_c0 = last_ins->inst_imm;
+ g_assert_not_reached (); // check this rule
+#endif
+ }
+ break;
+ case OP_LOADU1_MEMBASE:
+ case OP_LOADI1_MEMBASE:
+ /*
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * CONV_I2/U2 reg1, reg2
+ */
+ if (last_ins && X86_IS_BYTE_REG (last_ins->sreg1) &&
+ (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+ ins->sreg1 = last_ins->sreg1;
+ }
+ break;
+ case OP_LOADU2_MEMBASE:
+ case OP_LOADI2_MEMBASE:
+ /*
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * OP_LOAD_MEMBASE offset(basereg), reg2
+ * -->
+ * OP_STORE_MEMBASE_REG reg1, offset(basereg)
+ * CONV_I2/U2 reg1, reg2
+ */
+ if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+ ins->sreg1 = last_ins->sreg1;
+ }
+ break;
+ case CEE_CONV_I4:
+ case CEE_CONV_U4:
+ case OP_ICONV_TO_I4:
+ case OP_MOVE:
+ /*
+ * Removes:
+ *
+ * OP_MOVE reg, reg
+ */
+ if (ins->dreg == ins->sreg1) {
+ if (last_ins)
+ last_ins->next = ins->next;
+ ins = ins->next;
+ continue;
+ }
+ /*
+ * Removes:
+ *
+ * OP_MOVE sreg, dreg
+ * OP_MOVE dreg, sreg
+ */
+ if (last_ins && last_ins->opcode == OP_MOVE &&
+ ins->sreg1 == last_ins->dreg &&
+ ins->dreg == last_ins->sreg1) {
+ last_ins->next = ins->next;
+ ins = ins->next;
+ continue;
+ }
+ break;
+ case OP_X86_PUSH_MEMBASE:
+ if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG ||
+ last_ins->opcode == OP_STORE_MEMBASE_REG) &&
+ ins->inst_basereg == last_ins->inst_destbasereg &&
+ ins->inst_offset == last_ins->inst_offset) {
+ ins->opcode = OP_X86_PUSH;
+ ins->sreg1 = last_ins->sreg1;
+ }
+ break;
+ }
+ last_ins = ins;
+ ins = ins->next;
+ }
+ bb->last_ins = last_ins;
+}
+
static const int
branch_cc_table [] = {
X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
X86_CC_O, X86_CC_NO, X86_CC_C, X86_CC_NC
};
-static const char*const * ins_spec = pentium_desc;
+/* Maps CMP_... constants to X86_CC_... constants */
+static const int
+cc_table [] = {
+ X86_CC_EQ, X86_CC_NE, X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT,
+ X86_CC_LE, X86_CC_GE, X86_CC_LT, X86_CC_GT
+};
+
+static const int
+cc_signed_table [] = {
+ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
+ FALSE, FALSE, FALSE, FALSE
+};
-/*#include "cprop.c"*/
void
mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
{
+ if (cfg->opt & MONO_OPT_PEEPHOLE)
+ peephole_pass_1 (cfg, bb);
+
mono_local_regalloc (cfg, bb);
}
static unsigned char*
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int size, gboolean is_signed)
{
+#define XMM_TEMP_REG 0
+ if (cfg->opt & MONO_OPT_SSE2 && size < 8) {
+ /* optimize by assigning a local var for this use so we avoid
+ * the stack manipulations */
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
+ x86_fst_membase (code, X86_ESP, 0, TRUE, TRUE);
+ x86_movsd_reg_membase (code, XMM_TEMP_REG, X86_ESP, 0);
+ x86_cvttsd2si (code, dreg, XMM_TEMP_REG);
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
+ if (size == 1)
+ x86_widen_reg (code, dreg, dreg, is_signed, FALSE);
+ else if (size == 2)
+ x86_widen_reg (code, dreg, dreg, is_signed, TRUE);
+ return code;
+ }
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
x86_fnstcw_membase(code, X86_ESP, 0);
x86_mov_reg_membase (code, dreg, X86_ESP, 0, 2);
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
- cinfo = get_call_info (((MonoCallInst*)ins)->signature, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
if (cinfo->ret.storage == ArgValuetypeInReg) {
/* Pop the destination address from the stack */
x86_pop_reg (code, X86_ECX);
}
}
}
- g_free (cinfo);
default:
break;
}
return code;
}
+/*
+ * emit_load_volatile_arguments:
+ *
+ * Load volatile arguments from the stack to the original input registers.
+ * Required before a tail call.
+ */
+static guint8*
+emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
+{
+ MonoMethod *method = cfg->method;
+ MonoMethodSignature *sig;
+ MonoInst *inst;
+ CallInfo *cinfo;
+ guint32 i;
+
+ /* FIXME: Generate intermediate code instead */
+
+ sig = mono_method_signature (method);
+
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
+
+ /* This is the opposite of the code in emit_prolog */
+
+ for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+ ArgInfo *ainfo = cinfo->args + i;
+ MonoType *arg_type;
+ inst = cfg->args [i];
+
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
+
+ /*
+ * On x86, the arguments are either in their original stack locations, or in
+ * global regs.
+ */
+ if (inst->opcode == OP_REGVAR) {
+ g_assert (ainfo->storage == ArgOnStack);
+
+ x86_mov_membase_reg (code, X86_EBP, inst->inst_offset, inst->dreg, 4);
+ }
+ }
+
+ return code;
+}
+
#define REAL_PRINT_REG(text,reg) \
mono_assert (reg >= 0); \
x86_push_reg (code, X86_EAX); \
while (ins) {
offset = code - cfg->native_code;
- max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
+ max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
if (offset > (cfg->code_size - max_len - 16)) {
cfg->code_size *= 2;
case OP_X86_MUL_MEMBASE:
x86_imul_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
break;
- case CEE_BREAK:
+ case OP_BREAK:
x86_breakpoint (code);
break;
case OP_ADDCC:
x86_alu_reg_imm (code, X86_OR, ins->sreg1, ins->inst_imm);
break;
case CEE_XOR:
+ case OP_IXOR:
x86_alu_reg_reg (code, X86_XOR, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
guint8 *jump_to_end;
/* handle shifts below 32 bits */
- x86_shld_reg (code, ins->unused, ins->sreg1);
+ x86_shld_reg (code, ins->backend.reg3, ins->sreg1);
x86_shift_reg (code, X86_SHL, ins->sreg1);
x86_test_reg_imm (code, X86_ECX, 32);
jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, TRUE);
/* handle shift over 32 bit */
- x86_mov_reg_reg (code, ins->unused, ins->sreg1, 4);
+ x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
x86_clear_reg (code, ins->sreg1);
x86_patch (jump_to_end, code);
guint8 *jump_to_end;
/* handle shifts below 32 bits */
- x86_shrd_reg (code, ins->sreg1, ins->unused);
- x86_shift_reg (code, X86_SAR, ins->unused);
+ x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
+ x86_shift_reg (code, X86_SAR, ins->backend.reg3);
x86_test_reg_imm (code, X86_ECX, 32);
jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
/* handle shifts over 31 bits */
- x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
- x86_shift_reg_imm (code, X86_SAR, ins->unused, 31);
+ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+ x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 31);
x86_patch (jump_to_end, code);
}
guint8 *jump_to_end;
/* handle shifts below 32 bits */
- x86_shrd_reg (code, ins->sreg1, ins->unused);
- x86_shift_reg (code, X86_SHR, ins->unused);
+ x86_shrd_reg (code, ins->sreg1, ins->backend.reg3);
+ x86_shift_reg (code, X86_SHR, ins->backend.reg3);
x86_test_reg_imm (code, X86_ECX, 32);
jump_to_end = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
/* handle shifts over 31 bits */
- x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
- x86_shift_reg_imm (code, X86_SHR, ins->unused, 31);
+ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+ x86_clear_reg (code, ins->backend.reg3);
x86_patch (jump_to_end, code);
}
break;
case OP_LSHL_IMM:
if (ins->inst_imm >= 32) {
- x86_mov_reg_reg (code, ins->unused, ins->sreg1, 4);
+ x86_mov_reg_reg (code, ins->backend.reg3, ins->sreg1, 4);
x86_clear_reg (code, ins->sreg1);
- x86_shift_reg_imm (code, X86_SHL, ins->unused, ins->inst_imm - 32);
+ x86_shift_reg_imm (code, X86_SHL, ins->backend.reg3, ins->inst_imm - 32);
} else {
- x86_shld_reg_imm (code, ins->unused, ins->sreg1, ins->inst_imm);
+ x86_shld_reg_imm (code, ins->backend.reg3, ins->sreg1, ins->inst_imm);
x86_shift_reg_imm (code, X86_SHL, ins->sreg1, ins->inst_imm);
}
break;
case OP_LSHR_IMM:
if (ins->inst_imm >= 32) {
- x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
- x86_shift_reg_imm (code, X86_SAR, ins->unused, 0x1f);
+ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+ x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, 0x1f);
x86_shift_reg_imm (code, X86_SAR, ins->sreg1, ins->inst_imm - 32);
} else {
- x86_shrd_reg_imm (code, ins->sreg1, ins->unused, ins->inst_imm);
- x86_shift_reg_imm (code, X86_SAR, ins->unused, ins->inst_imm);
+ x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
+ x86_shift_reg_imm (code, X86_SAR, ins->backend.reg3, ins->inst_imm);
}
break;
case OP_LSHR_UN_IMM:
if (ins->inst_imm >= 32) {
- x86_mov_reg_reg (code, ins->sreg1, ins->unused, 4);
- x86_clear_reg (code, ins->unused);
+ x86_mov_reg_reg (code, ins->sreg1, ins->backend.reg3, 4);
+ x86_clear_reg (code, ins->backend.reg3);
x86_shift_reg_imm (code, X86_SHR, ins->sreg1, ins->inst_imm - 32);
} else {
- x86_shrd_reg_imm (code, ins->sreg1, ins->unused, ins->inst_imm);
- x86_shift_reg_imm (code, X86_SHR, ins->unused, ins->inst_imm);
+ x86_shrd_reg_imm (code, ins->sreg1, ins->backend.reg3, ins->inst_imm);
+ x86_shift_reg_imm (code, X86_SHR, ins->backend.reg3, ins->inst_imm);
}
break;
case CEE_NOT:
break;
case CEE_CONV_U4:
g_assert_not_reached ();
- case CEE_JMP: {
+ case OP_JMP: {
/*
* Note: this 'frame destruction' logic is useful for tail calls, too.
* Keep in sync with the code in emit_epilog.
g_assert (!cfg->method->save_lmf);
+ code = emit_load_volatile_arguments (cfg, code);
+
if (cfg->used_int_regs & (1 << X86_EBX))
pos -= 4;
if (cfg->used_int_regs & (1 << X86_EDI))
x86_pop_reg (code, X86_EDI);
break;
case OP_X86_LEA:
- x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->unused);
+ x86_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
break;
case OP_X86_LEA_MEMBASE:
x86_lea_membase (code, ins->dreg, ins->sreg1, ins->inst_imm);
break;
case OP_LOCALLOC:
/* keep alignment */
- x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
- x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
+ x86_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1);
+ x86_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1));
code = mono_emit_stack_alloc (code, ins);
x86_mov_reg_reg (code, ins->dreg, X86_ESP, 4);
break;
case CEE_RET:
x86_ret (code);
break;
- case CEE_THROW: {
+ case OP_THROW: {
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
- case CEE_BR:
+ case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
//if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
//break;
x86_jump_reg (code, ins->sreg1);
break;
case OP_CEQ:
- x86_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
- x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
- break;
case OP_CLT:
- x86_set_reg (code, X86_CC_LT, ins->dreg, TRUE);
- x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
- break;
case OP_CLT_UN:
- x86_set_reg (code, X86_CC_LT, ins->dreg, FALSE);
- x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
- break;
case OP_CGT:
- x86_set_reg (code, X86_CC_GT, ins->dreg, TRUE);
- x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
- break;
case OP_CGT_UN:
- x86_set_reg (code, X86_CC_GT, ins->dreg, FALSE);
- x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
- break;
case OP_CNE:
- x86_set_reg (code, X86_CC_NE, ins->dreg, TRUE);
+ x86_set_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
x86_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
break;
case OP_COND_EXC_EQ:
case OP_COND_EXC_GE_UN:
case OP_COND_EXC_LE:
case OP_COND_EXC_LE_UN:
+ EMIT_COND_SYSTEM_EXCEPTION (cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->inst_p1);
+ break;
case OP_COND_EXC_OV:
case OP_COND_EXC_NO:
case OP_COND_EXC_C:
case CEE_BGE_UN:
case CEE_BLE:
case CEE_BLE_UN:
- EMIT_COND_BRANCH (ins, branch_cc_table [ins->opcode - CEE_BEQ], (ins->opcode < CEE_BNE_UN));
+ EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
break;
/* floating point opcodes */
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
x86_fist_pop_membase (code, X86_ESP, 0, TRUE);
x86_pop_reg (code, ins->dreg);
- x86_pop_reg (code, ins->unused);
+ x86_pop_reg (code, ins->backend.reg3);
x86_fldcw_membase (code, X86_ESP, 0);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
break;
case OP_FBGT:
case OP_FBGT_UN:
if (cfg->opt & MONO_OPT_FCMOV) {
- EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
+ if (ins->opcode == OP_FBGT) {
+ guchar *br1;
+
+ /* skip branch if C1=1 */
+ br1 = code;
+ x86_branch8 (code, X86_CC_P, 0, FALSE);
+ /* branch if (C0 | C3) = 1 */
+ EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
+ x86_patch (br1, code);
+ } else {
+ EMIT_COND_BRANCH (ins, X86_CC_LT, FALSE);
+ }
break;
}
x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
x86_alu_reg_imm (code, X86_CMP, X86_EAX, X86_FP_C0);
EMIT_COND_BRANCH (ins, X86_CC_NE, FALSE);
break;
- case CEE_CKFINITE: {
+ case OP_CKFINITE: {
x86_push_reg (code, X86_EAX);
x86_fxam (code);
x86_fnstsw (code);
guint8 *code;
cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 256);
+
+ if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
+ cfg->code_size += 512;
+
code = cfg->native_code = g_malloc (cfg->code_size);
x86_push_reg (code, X86_EBP);
x86_push_reg (code, X86_EDI);
x86_push_reg (code, X86_EBX);
- /* save method info */
- x86_push_imm (code, method);
+ if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
+ /*
+ * Optimized version which uses the mono_lmf TLS variable instead of indirection
+ * through the mono_lmf_addr TLS variable.
+ */
+ /* %eax = previous_lmf */
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_reg_mem (code, X86_EAX, lmf_tls_offset, 4);
+ /* skip esp + method_info + lmf */
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
+ /* push previous_lmf */
+ x86_push_reg (code, X86_EAX);
+ /* new lmf = ESP */
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_mem_reg (code, lmf_tls_offset, X86_ESP, 4);
+ } else {
+ /* get the address of lmf for the current thread */
+ /*
+ * This is performance critical so we try to use some tricks to make
+ * it fast.
+ */
- /* get the address of lmf for the current thread */
- /*
- * This is performance critical so we try to use some tricks to make
- * it fast.
- */
- if (lmf_tls_offset != -1) {
- /* Load lmf quicky using the GS register */
- code = emit_tls_get (code, X86_EAX, lmf_tls_offset);
+ if (lmf_addr_tls_offset != -1) {
+ /* Load lmf quicky using the GS register */
+ code = emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
#ifdef PLATFORM_WIN32
- /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
- /* FIXME: Add a separate key for LMF to avoid this */
- x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+ /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
+ /* FIXME: Add a separate key for LMF to avoid this */
+ x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
#endif
- } else {
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
- }
+ } else {
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
+ }
- /* push lmf */
- x86_push_reg (code, X86_EAX);
- /* push *lfm (previous_lmf) */
- x86_push_membase (code, X86_EAX, 0);
- /* *(lmf) = ESP */
- x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
+ /* Skip esp + method info */
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
+
+ /* push lmf */
+ x86_push_reg (code, X86_EAX);
+ /* push *lfm (previous_lmf) */
+ x86_push_membase (code, X86_EAX, 0);
+ /* *(lmf) = ESP */
+ x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
+ }
} else {
if (cfg->used_int_regs & (1 << X86_EBX)) {
if (ins->opcode == OP_LABEL)
ins->inst_c1 = max_offset;
- max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
+ max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
ins = ins->next;
}
}
pos = 0;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = cfg->varinfo [pos];
+ inst = cfg->args [pos];
if (inst->opcode == OP_REGVAR) {
x86_mov_reg_membase (code, inst->dreg, X86_EBP, inst->inst_offset, 4);
if (cfg->verbose_level > 2)
if (cfg->method->save_lmf)
max_epilog_size += 128;
-
- if (mono_jit_trace_calls != NULL)
- max_epilog_size += 50;
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
- /* the code restoring the registers must be kept in sync with CEE_JMP */
+ /* the code restoring the registers must be kept in sync with OP_JMP */
pos = 0;
if (method->save_lmf) {
gint32 prev_lmf_reg;
gint32 lmf_offset = -sizeof (MonoLMF);
- /* Find a spare register */
- switch (sig->ret->type) {
- case MONO_TYPE_I8:
- case MONO_TYPE_U8:
- prev_lmf_reg = X86_EDI;
- cfg->used_int_regs |= (1 << X86_EDI);
- break;
- default:
- prev_lmf_reg = X86_EDX;
- break;
- }
+ if ((lmf_tls_offset != -1) && !is_win32 && !optimize_for_xen) {
+ /*
+ * Optimized version which uses the mono_lmf TLS variable instead of indirection
+ * through the mono_lmf_addr TLS variable.
+ */
+ /* reg = previous_lmf */
+ x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
- /* reg = previous_lmf */
- x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
+ /* lmf = previous_lmf */
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_mem_reg (code, lmf_tls_offset, X86_ECX, 4);
+ } else {
+ /* Find a spare register */
+ switch (sig->ret->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ prev_lmf_reg = X86_EDI;
+ cfg->used_int_regs |= (1 << X86_EDI);
+ break;
+ default:
+ prev_lmf_reg = X86_EDX;
+ break;
+ }
+
+ /* reg = previous_lmf */
+ x86_mov_reg_membase (code, prev_lmf_reg, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 4);
- /* ecx = lmf */
- x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
+ /* ecx = lmf */
+ x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 4);
- /* *(lmf) = previous_lmf */
- x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
+ /* *(lmf) = previous_lmf */
+ x86_mov_membase_reg (code, X86_ECX, 0, prev_lmf_reg, 4);
+ }
/* restore caller saved regs */
if (cfg->used_int_regs & (1 << X86_EBX)) {
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, cfg->mempool, sig, FALSE);
if (cinfo->ret.storage == ArgValuetypeInReg) {
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
else
x86_ret (code);
- g_free (cinfo);
-
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
exc_throw_start [nthrows] = code;
}
- x86_push_imm (code, exc_class->type_token);
+ x86_push_imm (code, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
patch_info->data.name = "mono_arch_throw_corlib_exception";
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
patch_info->ip.i = code - cfg->native_code;
tls_offset_inited = TRUE;
appdomain_tls_offset = mono_domain_get_tls_offset ();
lmf_tls_offset = mono_get_lmf_tls_offset ();
+ lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
thread_tls_offset = mono_thread_get_tls_offset ();
#endif
}
mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
{
MonoCallInst *call = (MonoCallInst*)inst;
- CallInfo *cinfo = get_call_info (inst->signature, FALSE);
+ CallInfo *cinfo = get_call_info (cfg, cfg->mempool, inst->signature, FALSE);
/* add the this argument */
if (this_reg != -1) {
mono_bblock_add_inst (cfg->cbb, vtarg);
}
}
+}
+
+#ifdef MONO_ARCH_HAVE_IMT
+
+// Linear handler, the bsearch head compare is shorter
+//[2 + 4] x86_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
+//[1 + 1] x86_branch8(inst,cond,imm,is_signed)
+// x86_patch(ins,target)
+//[1 + 5] x86_jump_mem(inst,mem)
+
+#define CMP_SIZE 6
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 5
+#define JUMP_IMM_SIZE 6
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+static int
+imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
+{
+ int i, distance = 0;
+ for (i = start; i < target; ++i)
+ distance += imt_entries [i]->chunk_size;
+ return distance;
+}
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint8 *code, *start;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
+ } else {
+ item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
+#endif
+ }
+ } else {
+ item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+ code = mono_code_manager_reserve (domain->code_mp, size);
+ start = code;
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ item->code_target = code;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->method);
+ item->jmp_code = code;
+ x86_branch8 (code, X86_CC_NE, 0, FALSE);
+ x86_jump_mem (code, & (vtable->vtable [item->vtable_slot]));
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->method);
+ item->jmp_code = code;
+ x86_branch8 (code, X86_CC_NE, 0, FALSE);
+#endif
+ x86_jump_mem (code, & (vtable->vtable [item->vtable_slot]));
+#if ENABLE_WRONG_METHOD_CHECK
+ x86_patch (item->jmp_code, code);
+ x86_breakpoint (code);
+ item->jmp_code = NULL;
+#endif
+ }
+ } else {
+ x86_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)item->method);
+ item->jmp_code = code;
+ if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
+ x86_branch8 (code, X86_CC_GE, 0, FALSE);
+ else
+ x86_branch32 (code, X86_CC_GE, 0, FALSE);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ x86_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ mono_stats.imt_thunks_size += code - start;
+ g_assert (code - start <= size);
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+ return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+ MonoMethodSignature *sig = mono_method_signature (method);
+ CallInfo *cinfo = get_call_info (NULL, NULL, sig, FALSE);
+ int this_argument_offset;
+ MonoObject *this_argument;
+
+ /*
+ * this is the offset of the this arg from esp as saved at the start of
+ * mono_arch_create_trampoline_code () in tramp-x86.c.
+ */
+ this_argument_offset = 5;
+ if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
+ this_argument_offset++;
+
+ this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
g_free (cinfo);
+ return this_argument;
}
+#endif
MonoInst*
mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
}
}
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
{
guint8 reg = 0;
gint32 disp = 0;
+ *displacement = 0;
+
/* go to the start of the call instruction
*
* address_byte = (m << 6) | (o << 3) | reg
* 0xff m=2,o=2 imm32
*/
code -= 6;
- if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
+
+ /*
+ * A given byte sequence can match more than case here, so we have to be
+ * really careful about the ordering of the cases. Longer sequences
+ * come first.
+ */
+ if ((code [-2] == 0x8b) && (x86_modrm_mod (code [-1]) == 0x2) && (code [4] == 0xff) && (x86_modrm_reg (code [5]) == 0x2) && (x86_modrm_mod (code [5]) == 0x0)) {
+ /*
+ * This is an interface call
+ * 8b 80 0c e8 ff ff mov 0xffffe80c(%eax),%eax
+ * ff 10 call *(%eax)
+ */
+ reg = x86_modrm_rm (code [5]);
+ disp = 0;
+#ifdef MONO_ARCH_HAVE_IMT
+ } else if ((code [-2] == 0xba) && (code [3] == 0xff) && (x86_modrm_mod (code [4]) == 1) && (x86_modrm_reg (code [4]) == 2) && ((signed char)code [5] < 0)) {
+ /* IMT-based interface calls: with MONO_ARCH_IMT_REG == edx
+ * ba 14 f8 28 08 mov $0x828f814,%edx
+ * ff 50 fc call *0xfffffffc(%eax)
+ */
+ reg = code [4] & 0x07;
+ disp = (signed char)code [5];
+#endif
+ } else if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
reg = code [4] & 0x07;
disp = (signed char)code [5];
} else {
return NULL;
} else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
/*
- * This is a interface call: should check the above code can't catch it earlier
+ * This is a interface call
* 8b 40 30 mov 0x30(%eax),%eax
* ff 10 call *(%eax)
*/
return NULL;
}
- return (gpointer*)(((gint32)(regs [reg])) + disp);
+ *displacement = disp;
+ return regs [reg];
}
-gpointer*
-mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
{
- guint8 reg = 0;
- gint32 disp = 0;
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
- code -= 7;
- if ((code [0] == 0x8b) && (x86_modrm_mod (code [1]) == 3) && (x86_modrm_reg (code [1]) == X86_EAX) && (code [2] == 0x8b) && (code [3] == 0x40) && (code [5] == 0xff) && (code [6] == 0xd0)) {
- reg = x86_modrm_rm (code [1]);
- disp = code [4];
+gpointer
+mono_arch_get_this_arg_from_call (MonoMethodSignature *sig, gssize *regs, guint8 *code)
+{
+ guint32 esp = regs [X86_ESP];
+ CallInfo *cinfo;
+ gpointer res;
- if (reg == X86_EAX)
- return NULL;
- else
- return (gpointer*)(((gint32)(regs [reg])) + disp);
+ cinfo = get_call_info (NULL, NULL, sig, FALSE);
+
+ /*
+ * The stack looks like:
+ * <other args>
+ * <this=delegate>
+ * <possible vtype return address>
+ * <return addr>
+ * <4 pointers pushed by mono_arch_create_trampoline_code ()>
+ */
+ res = (((MonoObject**)esp) [5 + (cinfo->args [0].offset / 4)]);
+ g_free (cinfo);
+ return res;
+}
+
+#define MAX_ARCH_DELEGATE_PARAMS 10
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ guint8 *code, *start;
+
+ if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+ return NULL;
+
+ /* FIXME: Support more cases */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return NULL;
+
+ /*
+ * The stack contains:
+ * <delegate>
+ * <return addr>
+ */
+
+ if (has_target) {
+ static guint8* cached = NULL;
+ mono_mini_arch_lock ();
+ if (cached) {
+ mono_mini_arch_unlock ();
+ return cached;
+ }
+
+ start = code = mono_global_codeman_reserve (64);
+
+ /* Replace the this argument with the target */
+ x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
+ x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, target), 4);
+ x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
+ x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+
+ g_assert ((code - start) < 64);
+
+ cached = start;
+
+ mono_mini_arch_unlock ();
+ } else {
+ static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+ int i = 0;
+ /* 8 for mov_reg and jump, plus 8 for each parameter */
+ int code_reserve = 8 + (sig->param_count * 8);
+
+ for (i = 0; i < sig->param_count; ++i)
+ if (!mono_is_regsize_var (sig->params [i]))
+ return NULL;
+
+ mono_mini_arch_lock ();
+ code = cache [sig->param_count];
+ if (code) {
+ mono_mini_arch_unlock ();
+ return code;
+ }
+
+ /*
+ * The stack contains:
+ * <args in reverse order>
+ * <delegate>
+ * <return addr>
+ *
+ * and we need:
+ * <args in reverse order>
+ * <return addr>
+ *
+ * without unbalancing the stack.
+ * So move each arg up a spot in the stack (overwriting un-needed 'this' arg)
+ * and leaving original spot of first arg as placeholder in stack so
+ * when callee pops stack everything works.
+ */
+
+ start = code = mono_global_codeman_reserve (code_reserve);
+
+ /* store delegate for access to method_ptr */
+ x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
+
+ /* move args up */
+ for (i = 0; i < sig->param_count; ++i) {
+ x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
+ x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
+ }
+
+ x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+
+ g_assert ((code - start) < code_reserve);
+
+ cache [sig->param_count] = start;
+
+ mono_mini_arch_unlock ();
}
- return NULL;
+ return start;
}