#include "inssel.h"
#include "trace.h"
#include "cpu-sparc.h"
+#include "jit-icalls.h"
/*
* Sparc V9 means two things:
#endif
#endif
-#define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
-
#define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
#define SIGNAL_STACK_SIZE (64 * 1024)
static gpointer mono_arch_get_lmf_addr (void);
-static int
-mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
-
const char*
mono_arch_regname (int reg) {
static const char * rnames[] = {
mono_arch_cpu_optimizazions(&dummy);
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
return opts;
}
-static void
-mono_arch_break (void)
-{
-}
-
#ifdef __GNUC__
#define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
#else /* assume Sun's compiler */
/* Hopefully this is optimized based on the actual CPU */
sync_instruction_memory (code, size);
#else
- guint64 *p = (guint64*)code;
- guint64 *end = (guint64*)(code + ((size + 8) /8));
-
- /*
- * FIXME: Flushing code in dword chunks in _slow_.
+ gulong start = (gulong) code;
+ gulong end = start + size;
+ gulong align;
+
+ /* Sparcv9 chips only need flushes on 32 byte
+ * cacheline boundaries.
+ *
+ * Sparcv8 needs a flush every 8 bytes.
*/
- while (p < end)
+ align = (sparcv9 ? 32 : 8);
+
+ start &= ~(align - 1);
+ end = (end + (align - 1)) & ~(align - 1);
+
+ while (start < end) {
#ifdef __GNUC__
- __asm__ __volatile__ ("iflush %0"::"r"(p++));
+ __asm__ __volatile__ ("iflush %0"::"r"(start));
#else
- flushi (p ++);
+ flushi (start);
#endif
+ start += align;
+ }
#endif
}
* the 'Sparc Compliance Definition 2.4' document.
*/
static CallInfo*
-get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
{
guint32 i, gr, fr;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
+ MonoType *ret_type;
+ MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
for (i = 0; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
+ MonoType *ptype;
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, ainfo, FALSE);
continue;
}
- switch (mono_type_get_underlying_type (sig->params [i])->type) {
+ ptype = mono_type_get_underlying_type (sig->params [i]);
+ ptype = mini_get_basic_type_from_generic (gsctx, ptype);
+ switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
}
/* return value */
- {
- switch (mono_type_get_underlying_type (sig->ret)->type) {
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_STRING:
- cinfo->ret.storage = ArgInIReg;
- cinfo->ret.reg = sparc_i0;
- if (gr < 1)
- gr = 1;
- break;
- case MONO_TYPE_U8:
- case MONO_TYPE_I8:
+ ret_type = mono_type_get_underlying_type (sig->ret);
+ ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
+ switch (ret_type->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 1)
+ gr = 1;
+ break;
+ case MONO_TYPE_U8:
+ case MONO_TYPE_I8:
#ifdef SPARCV9
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 1)
+ gr = 1;
+#else
+ cinfo->ret.storage = ArgInIRegPair;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 2)
+ gr = 2;
+#endif
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ cinfo->ret.storage = ArgInFReg;
+ cinfo->ret.reg = sparc_f0;
+ break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = sparc_i0;
if (gr < 1)
gr = 1;
-#else
- cinfo->ret.storage = ArgInIRegPair;
- cinfo->ret.reg = sparc_i0;
- if (gr < 2)
- gr = 2;
-#endif
- break;
- case MONO_TYPE_R4:
- case MONO_TYPE_R8:
- cinfo->ret.storage = ArgInFReg;
- cinfo->ret.reg = sparc_f0;
break;
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
- cinfo->ret.storage = ArgInIReg;
- cinfo->ret.reg = sparc_i0;
- if (gr < 1)
- gr = 1;
- break;
- }
- /* Fall through */
- case MONO_TYPE_VALUETYPE:
- if (v64) {
- if (sig->pinvoke)
- NOT_IMPLEMENTED;
- else
- /* Already done */
- ;
- }
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (v64) {
+ if (sig->pinvoke)
+ NOT_IMPLEMENTED;
else
- cinfo->ret.storage = ArgOnStack;
- break;
- case MONO_TYPE_TYPEDBYREF:
- if (v64) {
- if (sig->pinvoke)
- /* Same as a valuetype with size 24 */
- NOT_IMPLEMENTED;
- else
- /* Already done */
- ;
- }
+ /* Already done */
+ ;
+ }
+ else
+ cinfo->ret.storage = ArgOnStack;
+ break;
+ case MONO_TYPE_TYPEDBYREF:
+ if (v64) {
+ if (sig->pinvoke)
+ /* Same as a valuetype with size 24 */
+ NOT_IMPLEMENTED;
else
- cinfo->ret.storage = ArgOnStack;
- break;
- case MONO_TYPE_VOID:
- break;
- default:
- g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ /* Already done */
+ ;
}
+ else
+ cinfo->ret.storage = ArgOnStack;
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
cinfo->stack_usage = stack_size;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* Use unused input registers */
for (i = cinfo->reg_usage; i < 6; ++i)
* Set var information according to the calling convention. sparc version.
* The locals var stuff should most likely be split in another method.
*/
+
void
-mono_arch_allocate_vars (MonoCompile *m)
+mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
int i, offset, size, align, curinst;
CallInfo *cinfo;
- header = mono_method_get_header (m->method);
+ header = mono_method_get_header (cfg->method);
- sig = mono_method_signature (m->method);
+ sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
if (sig->ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFReg:
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
+ break;
case ArgInIRegPair:
- m->ret->opcode = OP_REGVAR;
- m->ret->inst_c0 = cinfo->ret.reg;
+ if (cfg->new_ir && ((sig->ret->type == MONO_TYPE_I8) || (sig->ret->type == MONO_TYPE_U8))) {
+ MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
+ MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
+
+ low->opcode = OP_REGVAR;
+ low->dreg = cinfo->ret.reg + 1;
+ high->opcode = OP_REGVAR;
+ high->dreg = cinfo->ret.reg;
+ }
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
break;
case ArgOnStack:
#ifdef SPARCV9
g_assert_not_reached ();
#else
/* valuetypes */
- m->ret->opcode = OP_REGOFFSET;
- m->ret->inst_basereg = sparc_fp;
- m->ret->inst_offset = 64;
+ cfg->vret_addr->opcode = OP_REGOFFSET;
+ cfg->vret_addr->inst_basereg = sparc_fp;
+ cfg->vret_addr->inst_offset = 64;
#endif
break;
default:
NOT_IMPLEMENTED;
}
- m->ret->dreg = m->ret->inst_c0;
+ cfg->ret->dreg = cfg->ret->inst_c0;
}
/*
*/
/* Locals are allocated backwards from %fp */
- m->frame_reg = sparc_fp;
+ cfg->frame_reg = sparc_fp;
offset = 0;
/*
if (header->num_clauses)
offset += sizeof (gpointer) * 2;
- if (m->method->save_lmf) {
+ if (cfg->method->save_lmf) {
offset += sizeof (MonoLMF);
- m->arch.lmf_offset = offset;
+ cfg->arch.lmf_offset = offset;
}
- curinst = m->locals_start;
- for (i = curinst; i < m->num_varinfo; ++i) {
- inst = m->varinfo [i];
+ curinst = cfg->locals_start;
+ for (i = curinst; i < cfg->num_varinfo; ++i) {
+ inst = cfg->varinfo [i];
- if (inst->opcode == OP_REGVAR) {
+ if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
//g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
continue;
}
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
else
- size = mono_type_stack_size (inst->inst_vtype, &align);
+ size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
/*
* This is needed since structures containing doubles must be doubleword
}
if (sig->call_convention == MONO_CALL_VARARG) {
- m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
+ cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = m->varinfo [i];
+ inst = cfg->args [i];
if (inst->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
#endif
/* FIXME: Allocate volatile arguments to registers */
+ /* FIXME: This makes the argument holding a vtype address into volatile */
if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
switch (storage) {
case ArgInIReg:
- case ArgInIRegPair:
inst->opcode = OP_REGVAR;
inst->dreg = sparc_i0 + ainfo->reg;
break;
+ case ArgInIRegPair:
+ if (cfg->new_ir && (inst->type == STACK_I8)) {
+ MonoInst *low = get_vreg_to_inst (cfg, inst->dreg + 1);
+ MonoInst *high = get_vreg_to_inst (cfg, inst->dreg + 2);
+
+ low->opcode = OP_REGVAR;
+ low->dreg = sparc_i0 + ainfo->reg + 1;
+ high->opcode = OP_REGVAR;
+ high->dreg = sparc_i0 + ainfo->reg;
+ }
+ inst->opcode = OP_REGVAR;
+ inst->dreg = sparc_i0 + ainfo->reg;
+ break;
case ArgInFloatReg:
case ArgInDoubleReg:
/*
* are destructively modified in a lot of places in inssel.brg.
*/
MonoInst *indir;
- MONO_INST_NEW (m, indir, 0);
+ MONO_INST_NEW (cfg, indir, 0);
*indir = *inst;
- inst->opcode = OP_SPARC_INARG_VT;
+ inst->opcode = OP_VTARG_ADDR;
inst->inst_left = indir;
}
}
}
+ /* Add a properly aligned dword for use by int<->float conversion opcodes */
+ offset += 8;
+ offset = ALIGN_TO (offset, 8);
+ cfg->arch.float_spill_slot_offset = offset;
+
/*
* spillvars are stored between the normal locals and the storage reserved
* by the ABI.
*/
- m->stack_offset = offset;
-
- /* Add a properly aligned dword for use by int<->float conversion opcodes */
- m->spill_count ++;
- mono_spillvar_offset_float (m, 0);
+ cfg->stack_offset = offset;
g_free (cinfo);
}
+void
+mono_arch_create_vars (MonoCompile *cfg)
+{
+ MonoMethodSignature *sig;
+
+ sig = mono_method_signature (cfg->method);
+
+ if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
+ cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr = ");
+ mono_print_ins (cfg->vret_addr);
+ }
+ }
+}
+
static MonoInst *
make_group (MonoCompile *cfg, MonoInst *left, int basereg, int offset)
{
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (sig, sig->pinvoke);
+ cinfo = get_call_info (cfg, sig, sig->pinvoke);
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
else {
/*
- * Can't use mono_type_stack_size (), but that
+ * Can't use mini_type_stack_size (), but that
* aligns the size to sizeof (gpointer), which is larger
* than the size of the source, leading to reads of invalid
* memory if the source is at the end of address space or
return call;
}
+/* FIXME: Remove these later */
+#define NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { \
+ MONO_INST_NEW ((cfg), (dest), (op)); \
+ (dest)->dreg = (dr); \
+ (dest)->inst_basereg = (base); \
+ (dest)->inst_offset = (offset); \
+ (dest)->type = STACK_I4; \
+ } while (0)
+
+#define EMIT_NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
+
+#undef MONO_EMIT_NEW_STORE_MEMBASE_IMM
+#define MONO_EMIT_NEW_STORE_MEMBASE_IMM(cfg,op,base,offset,imm) do { \
+ MonoInst *inst; \
+ MONO_INST_NEW ((cfg), (inst), (op)); \
+ inst->inst_destbasereg = base; \
+ inst->inst_offset = offset; \
+ inst->inst_p1 = (gpointer)(gssize)imm; \
+ MONO_ADD_INS ((cfg)->cbb, inst); \
+ } while (0)
+
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
+{
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, 0);
+
+ arg->sreg1 = sreg;
+
+ switch (storage) {
+ case ArgInIReg:
+ arg->opcode = OP_MOVE;
+ arg->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
+ break;
+ case ArgInFloatReg:
+ arg->opcode = OP_FMOVE;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ MONO_ADD_INS (cfg->cbb, arg);
+}
+
+static void
+add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
+{
+ MonoInst *arg;
+ int dreg = mono_alloc_ireg (cfg);
+
+ EMIT_NEW_LOAD_MEMBASE (cfg, arg, OP_LOAD_MEMBASE, dreg, sparc_sp, offset);
+ MONO_ADD_INS (cfg->cbb, arg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
+}
+
+static void
+emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+
+ switch (ainfo->storage) {
+ case ArgInIRegPair:
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, in->dreg + 1);
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
+ break;
+ case ArgOnStackPair:
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, in->dreg + 2);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
+ break;
+ case ArgInSplitRegStack:
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+
+ switch (ainfo->storage) {
+ case ArgInIRegPair:
+ /* floating-point <-> integer transfer must go through memory */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
+
+ /* Load into a register pair */
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
+ break;
+ case ArgOnStackPair:
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ break;
+ case ArgInSplitRegStack:
+ /* floating-point <-> integer transfer must go through memory */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ /* Load most significant word into register */
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ /* floating-point <-> integer transfer must go through memory */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
+ break;
+ case ArgOnStack:
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
+
+static void
+emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
+{
+ MonoInst *arg;
+ guint32 align, offset, pad, size;
+
+ if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else if (pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+
+ /* The first 6 argument locations are reserved */
+ if (cinfo->stack_usage < 6 * sizeof (gpointer))
+ cinfo->stack_usage = 6 * sizeof (gpointer);
+
+ offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
+ pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
+
+ cinfo->stack_usage += size;
+ cinfo->stack_usage += pad;
+
+ /*
+ * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
+ * use the normal OUTARG opcodes to pass the address of the location to
+ * the callee.
+ */
+ if (size > 0) {
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
+ ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
+ MONO_ADD_INS (cfg->cbb, arg);
+
+ MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
+ arg->dreg = mono_alloc_preg (cfg);
+ arg->sreg1 = sparc_sp;
+ arg->inst_imm = STACK_BIAS + offset;
+ MONO_ADD_INS (cfg->cbb, arg);
+
+ emit_pass_other (cfg, call, ainfo, NULL, arg);
+ }
+}
+
+static void
+emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+ int opcode;
+
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
+ break;
+ case ArgOnStack:
+#ifdef SPARCV9
+ NOT_IMPLEMENTED;
+#else
+ if (offset & 0x1)
+ opcode = OP_STOREI1_MEMBASE_REG;
+ else if (offset & 0x2)
+ opcode = OP_STOREI2_MEMBASE_REG;
+ else
+ opcode = OP_STOREI4_MEMBASE_REG;
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
+#endif
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmp_sig;
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+ /* We allways pass the signature on the stack for simplicity */
+ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
+}
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in;
+ MonoMethodSignature *sig;
+ int i, n;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
+ guint32 extra_space = 0;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg, sig, sig->pinvoke);
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ /* Set the 'struct/union return pointer' location on the stack */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
+ }
+
+ for (i = 0; i < n; ++i) {
+ MonoType *arg_type;
+
+ ainfo = cinfo->args + i;
+
+ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Emit the signature cookie just before the first implicit argument */
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ in = call->args [i];
+
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
+
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
+ emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
+ else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
+ emit_pass_long (cfg, call, ainfo, in);
+ else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
+ emit_pass_double (cfg, call, ainfo, in);
+ else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
+ emit_pass_float (cfg, call, ainfo, in);
+ else
+ emit_pass_other (cfg, call, ainfo, arg_type, in);
+ }
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ call->stack_usage = cinfo->stack_usage + extra_space;
+
+ g_free (cinfo);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ mini_emit_memcpy2 (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
+
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ break;
+ case ArgInIRegPair:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg + 2);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 1, val->dreg + 1);
+ break;
+ case ArgInFReg:
+ if (mono_method_signature (method)->ret->type == MONO_TYPE_R4)
+ MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
+ else
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ g_assert (cinfo);
+}
+
+int cond_to_sparc_cond [][3] = {
+ {sparc_be, sparc_be, sparc_fbe},
+ {sparc_bne, sparc_bne, 0},
+ {sparc_ble, sparc_ble, sparc_fble},
+ {sparc_bge, sparc_bge, sparc_fbge},
+ {sparc_bl, sparc_bl, sparc_fbl},
+ {sparc_bg, sparc_bg, sparc_fbg},
+ {sparc_bleu, sparc_bleu, 0},
+ {sparc_beu, sparc_beu, 0},
+ {sparc_blu, sparc_blu, sparc_fbl},
+ {sparc_bgu, sparc_bgu, sparc_fbg}
+};
+
/* Map opcode to the sparc condition codes */
static inline SparcCond
opcode_to_sparc_cond (int opcode)
{
+ CompRelation rel;
+ CompType t;
+
switch (opcode) {
- case OP_FBGE:
- return sparc_fbge;
- case OP_FBLE:
- return sparc_fble;
- case OP_FBEQ:
- case OP_FCEQ:
- return sparc_fbe;
- case OP_FBLT:
- case OP_FCLT:
- case OP_FCLT_UN:
- return sparc_fbl;
- case OP_FBGT:
- case OP_FCGT:
- case OP_FCGT_UN:
- return sparc_fbg;
- case CEE_BEQ:
- case OP_IBEQ:
- case OP_CEQ:
- case OP_ICEQ:
- case OP_COND_EXC_EQ:
- return sparc_be;
- case CEE_BNE_UN:
- case OP_COND_EXC_NE_UN:
- case OP_IBNE_UN:
- return sparc_bne;
- case CEE_BLT:
- case OP_IBLT:
- case OP_CLT:
- case OP_ICLT:
- case OP_COND_EXC_LT:
- return sparc_bl;
- case CEE_BLT_UN:
- case OP_IBLT_UN:
- case OP_CLT_UN:
- case OP_ICLT_UN:
- case OP_COND_EXC_LT_UN:
- return sparc_blu;
- case CEE_BGT:
- case OP_IBGT:
- case OP_CGT:
- case OP_ICGT:
- case OP_COND_EXC_GT:
- return sparc_bg;
- case CEE_BGT_UN:
- case OP_IBGT_UN:
- case OP_CGT_UN:
- case OP_ICGT_UN:
- case OP_COND_EXC_GT_UN:
- return sparc_bgu;
- case CEE_BGE:
- case OP_IBGE:
- case OP_COND_EXC_GE:
- return sparc_bge;
- case CEE_BGE_UN:
- case OP_IBGE_UN:
- case OP_COND_EXC_GE_UN:
- return sparc_beu;
- case CEE_BLE:
- case OP_IBLE:
- case OP_COND_EXC_LE:
- return sparc_ble;
- case CEE_BLE_UN:
- case OP_IBLE_UN:
- case OP_COND_EXC_LE_UN:
- return sparc_bleu;
case OP_COND_EXC_OV:
case OP_COND_EXC_IOV:
return sparc_bvs;
case OP_COND_EXC_NC:
NOT_IMPLEMENTED;
default:
- g_assert_not_reached ();
- return sparc_be;
+ rel = mono_opcode_to_cond (opcode);
+ t = mono_opcode_to_type (opcode, -1);
+
+ return cond_to_sparc_cond [rel][t];
+ break;
}
+
+ return -1;
}
#define COMPUTE_DISP(ins) \
#define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
MONO_PATCH_INFO_EXC, sexc_name); \
- if (sparcv9) { \
+ if (sparcv9 && ((icc) != sparc_icc_short)) { \
sparc_branchp (code, 0, (cond), (icc), 0, 0); \
} \
else { \
return code;
}
-static void
-peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+void
+mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *last_ins = NULL;
- ins = bb->code;
+}
- while (ins) {
+void
+mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins, *n, *last_ins = NULL;
+ ins = bb->code;
+ MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
case OP_MUL_IMM:
/* remove unnecessary multiplication with 1 */
if (ins->dreg != ins->sreg1) {
ins->opcode = OP_MOVE;
} else {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
}
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->dreg) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
} else {
ins->opcode = OP_MOVE;
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
if (sparcv9) {
last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
last_ins->inst_offset = ins->inst_offset;
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
}
break;
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLT:
- case CEE_BGT:
- case CEE_BGE:
- case CEE_BLE:
+ case OP_IBEQ:
+ case OP_IBNE_UN:
+ case OP_IBLT:
+ case OP_IBGT:
+ case OP_IBGE:
+ case OP_IBLE:
case OP_COND_EXC_EQ:
case OP_COND_EXC_GE:
case OP_COND_EXC_GT:
if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
(last_ins->opcode == OP_COMPARE_IMM) &&
(last_ins->inst_imm == 0)) {
- MonoInst *next = ins->next;
switch (ins->opcode) {
- case CEE_BEQ:
+ case OP_IBEQ:
ins->opcode = OP_SPARC_BRZ;
break;
- case CEE_BNE_UN:
+ case OP_IBNE_UN:
ins->opcode = OP_SPARC_BRNZ;
break;
- case CEE_BLT:
+ case OP_IBLT:
ins->opcode = OP_SPARC_BRLZ;
break;
- case CEE_BGT:
+ case OP_IBGT:
ins->opcode = OP_SPARC_BRGZ;
break;
- case CEE_BGE:
+ case OP_IBGE:
ins->opcode = OP_SPARC_BRGEZ;
break;
- case CEE_BLE:
+ case OP_IBLE:
ins->opcode = OP_SPARC_BRLEZ;
break;
case OP_COND_EXC_EQ:
}
ins->sreg1 = last_ins->sreg1;
*last_ins = *ins;
- last_ins->next = next;
- ins = next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
case OP_MOVE:
/*
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
- if (last_ins)
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
/*
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DELETE_INS (bb, ins);
continue;
}
break;
bb->last_ins = last_ins;
}
-static int
-mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
+void
+mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoSpillInfo **si, *info;
- int i = 0;
-
- si = &cfg->spill_info_float;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset += sizeof (double);
- cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
- info->offset = - cfg->stack_offset;
- }
-
- if (i == spillvar)
- return MONO_SPARC_STACK_BIAS + (*si)->offset;
-
- i++;
- si = &(*si)->next;
- }
-
- g_assert_not_reached ();
- return 0;
}
/* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- mono_local_regalloc (cfg, bb);
-}
-
static void
sparc_patch (guint32 *code, const gpointer target)
{
}
static guint32*
-emit_vret_token (MonoInst *ins, guint32 *code)
+emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
{
MonoCallInst *call = (MonoCallInst*)ins;
guint32 size;
*/
if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
- size = mono_type_stack_size (call->signature->ret, NULL);
+ size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
else
size = mono_class_native_size (call->signature->ret->data.klass, NULL);
sparc_unimp (code, size & 0xfff);
case OP_VOIDCALL_REG:
case OP_VOIDCALL_MEMBASE:
break;
- case CEE_CALL:
+ case OP_CALL:
case OP_CALL_REG:
case OP_CALL_MEMBASE:
g_assert (ins->dreg == sparc_o0);
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE:
break;
default:
NOT_IMPLEMENTED;
sig = mono_method_signature (method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* This is the opposite of the code in emit_prolog */
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
}
/*
- * mono_arch_get_vcall_slot_addr:
+ * mono_arch_get_vcall_slot:
*
* Determine the vtable slot used by a virtual call.
*/
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8 *code8, gpointer *regs, int *displacement)
{
guint32 *code = (guint32*)(gpointer)code8;
guint32 ins = code [0];
mono_sparc_flushw ();
+ *displacement = 0;
+
if (!mono_sparc_is_virtual_call (code))
return NULL;
if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
/* ld [r1 + CONST ], r2; call r2 */
guint32 base = sparc_inst_rs1 (prev_ins);
- guint32 disp = sparc_inst_imm13 (prev_ins);
+ gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
gpointer base_val;
g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
- return (gpointer)((guint8*)base_val + disp);
+ *displacement = disp;
+
+ return (gpointer)base_val;
}
else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
/* set r1, ICONST; ld [r1 + r2], r2; call r2 */
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
+
+ *displacement = disp;
- return (gpointer)((guint8*)base_val + disp);
+ return (gpointer)base_val;
} else
g_assert_not_reached ();
}
return NULL;
}
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
+
+#define CMP_SIZE 3
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 2
+#define JUMP_IMM_SIZE 5
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint32 *code, *start;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
+ } else {
+ item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
+#endif
+ }
+ } else {
+ item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+ code = mono_code_manager_reserve (domain->code_mp, size * 4);
+ start = code;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ item->code_target = (guint8*)code;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ }
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_bne, 0);
+ sparc_nop (code);
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ }
+ } else {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_beu, 0);
+ sparc_nop (code);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
+
+ mono_stats.imt_thunks_size += (code - start) * 4;
+ g_assert (code - start <= size);
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+#ifdef SPARCV9
+ g_assert_not_reached ();
+#endif
+
+ return (MonoMethod*)regs [sparc_g1];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
+{
+ mono_sparc_flushw ();
+
+ return (gpointer)regs [sparc_o0];
+}
+
/*
* Some conventions used in the following code.
* 2) The only scratch registers we have are o7 and g1. We try to
int max_len, cpos;
const char *spec;
- if (cfg->opt & MONO_OPT_PEEPHOLE)
- peephole_pass (cfg, bb);
-
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
NOT_IMPLEMENTED;
}
- ins = bb->code;
- while (ins) {
+ MONO_BB_FOR_EACH_INS (bb, ins) {
guint8* code_start;
offset = (guint8*)code - cfg->native_code;
spec = ins_get_spec (ins->opcode);
- /* I kept this, but this looks a workaround for a bug */
- if (spec == MONO_ARCH_CPU_SPEC)
- spec = ins_get_spec (CEE_ADD);
max_len = ((guint8 *)spec)[MONO_INST_LEN];
case OP_STORE_MEMBASE_REG:
EMIT_STORE_MEMBASE_REG (ins, sti);
break;
- case CEE_LDIND_I:
-#ifdef SPARCV9
- sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
-#else
- sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
-#endif
- break;
- case CEE_LDIND_I4:
-#ifdef SPARCV9
- sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
-#else
- sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
-#endif
- break;
- case CEE_LDIND_U4:
- sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
- break;
case OP_LOADU4_MEM:
sparc_set (code, ins->inst_c0, ins->dreg);
sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
EMIT_LOAD_MEMBASE (ins, ldx);
break;
#endif
- case CEE_CONV_I1:
+ case OP_ICONV_TO_I1:
sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
break;
- case CEE_CONV_I2:
+ case OP_ICONV_TO_I2:
sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
break;
- case CEE_CONV_U1:
+ case OP_ICONV_TO_U1:
sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
break;
- case CEE_CONV_U2:
+ case OP_ICONV_TO_U2:
sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
break;
- case CEE_CONV_OVF_U4:
+ case OP_LCONV_TO_OVF_U4:
+ case OP_ICONV_TO_OVF_U4:
/* Only used on V9 */
sparc_cmp_imm (code, ins->sreg1, 0);
mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
sparc_nop (code);
sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
break;
- case CEE_CONV_OVF_I4_UN:
+ case OP_LCONV_TO_OVF_I4_UN:
+ case OP_ICONV_TO_OVF_I4_UN:
/* Only used on V9 */
NOT_IMPLEMENTED;
break;
- case CEE_CONV_U:
- case CEE_CONV_U8:
- /* Only used on V9 */
- sparc_srl_imm (code, ins->sreg1, 0, ins->dreg);
- break;
- case CEE_CONV_I:
- case CEE_CONV_I8:
- /* Only used on V9 */
- sparc_sra_imm (code, ins->sreg1, 0, ins->dreg);
- break;
case OP_COMPARE:
case OP_LCOMPARE:
case OP_ICOMPARE:
sparc_cmp (code, ins->sreg1, sparc_o7);
}
break;
- case CEE_BREAK:
+ case OP_BREAK:
/*
* gdb does not like encountering 'ta 1' in the debugged code. So
* instead of emitting a trap, we emit a call a C function and place a
* breakpoint there.
*/
//sparc_ta (code, 1);
- mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_arch_break);
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
EMIT_CALL();
break;
case OP_ADDCC:
case OP_IADDCC:
sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
break;
- case CEE_ADD:
case OP_IADD:
sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
case OP_ISUBCC:
sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
break;
- case CEE_SUB:
case OP_ISUB:
sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
case OP_ISBB_IMM:
EMIT_ALU_IMM (ins, subx, TRUE);
break;
- case CEE_AND:
case OP_IAND:
sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
case OP_IAND_IMM:
EMIT_ALU_IMM (ins, and, FALSE);
break;
- case CEE_DIV:
case OP_IDIV:
/* Sign extend sreg1 into %y */
sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
break;
- case CEE_DIV_UN:
case OP_IDIV_UN:
sparc_wry (code, sparc_g0, sparc_g0);
sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
- case OP_DIV_IMM: {
+ case OP_DIV_IMM:
+ case OP_IDIV_IMM: {
int i, imm;
/* Transform division into a shift */
}
break;
}
- case CEE_REM:
+ case OP_IDIV_UN_IMM:
+ sparc_wry (code, sparc_g0, sparc_g0);
+ EMIT_ALU_IMM (ins, udiv, FALSE);
+ break;
case OP_IREM:
/* Sign extend sreg1 into %y */
sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
break;
- case CEE_REM_UN:
case OP_IREM_UN:
sparc_wry (code, sparc_g0, sparc_g0);
sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
}
sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
break;
- case CEE_OR:
case OP_IOR:
sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
case OP_IOR_IMM:
EMIT_ALU_IMM (ins, or, FALSE);
break;
- case CEE_XOR:
case OP_IXOR:
sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
case OP_IXOR_IMM:
EMIT_ALU_IMM (ins, xor, FALSE);
break;
- case CEE_SHL:
case OP_ISHL:
sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
break;
sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
}
break;
- case CEE_SHR:
case OP_ISHR:
sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
break;
sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
}
break;
- case CEE_SHR_UN:
case OP_ISHR_UN:
sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
break;
sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
}
break;
- case CEE_NOT:
case OP_INOT:
/* can't use sparc_not */
sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
break;
- case CEE_NEG:
case OP_INEG:
/* can't use sparc_neg */
sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
break;
- case CEE_MUL:
case OP_IMUL:
sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
EMIT_ALU_IMM (ins, smul, FALSE);
break;
}
- case CEE_MUL_OVF:
case OP_IMUL_OVF:
sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
sparc_rdy (code, sparc_g1);
sparc_cmp (code, sparc_g1, sparc_o7);
EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
break;
- case CEE_MUL_OVF_UN:
case OP_IMUL_OVF_UN:
sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
sparc_rdy (code, sparc_o7);
EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
break;
case OP_ICONST:
- case OP_SETREGIMM:
sparc_set (code, ins->inst_c0, ins->dreg);
break;
case OP_I8CONST:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
sparc_set_template (code, ins->dreg);
break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
+ case OP_JUMP_TABLE:
+ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ sparc_set_template (code, ins->dreg);
+ break;
+ case OP_ICONV_TO_I4:
+ case OP_ICONV_TO_U4:
case OP_MOVE:
- case OP_SETREG:
if (ins->sreg1 != ins->dreg)
sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
break;
- case OP_SETFREG:
- /* Only used on V9 */
+ case OP_FMOVE:
+#ifdef SPARCV9
if (ins->sreg1 != ins->dreg)
sparc_fmovd (code, ins->sreg1, ins->dreg);
+#else
+ sparc_fmovs (code, ins->sreg1, ins->dreg);
+ sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
+#endif
break;
case OP_SPARC_SETFREG_FLOAT:
/* Only used on V9 */
sparc_fdtos (code, ins->sreg1, ins->dreg);
break;
- case CEE_JMP:
+ case OP_JMP:
if (cfg->method->save_lmf)
NOT_IMPLEMENTED;
break;
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
- sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
+ /* Might be misaligned in case of vtypes so use a byte load */
+ sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
break;
case OP_ARGLIST:
sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
- case CEE_CALL:
+ case OP_CALL:
call = (MonoCallInst*)ins;
g_assert (!call->virtual);
code = emit_save_sp_to_lmf (cfg, code);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_SETFRET:
break;
case OP_LOCALLOC: {
guint32 size_reg;
+ gint32 offset2;
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
/* Perform stack touching */
#endif
/* Keep alignment */
- sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
+ /* Add 4 to compensate for the rounding of localloc_offset */
+ sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
/* Keep %sp valid at all times */
sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
- g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
- sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
+ /* Round localloc_offset too so the result is at least 8 aligned */
+ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
+ g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
+ sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
if (ins->flags & MONO_INST_INIT) {
guint32 *br [3];
}
break;
}
- case OP_SPARC_LOCALLOC_IMM: {
- gint32 offset = ins->inst_c0;
-
+ case OP_LOCALLOC_IMM: {
+ gint32 offset = ins->inst_imm;
+ gint32 offset2;
+
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
/* Perform stack touching */
NOT_IMPLEMENTED;
#endif
- offset = ALIGN_TO (offset, MONO_ARCH_LOCALLOC_ALIGNMENT);
+ /* To compensate for the rounding of localloc_offset */
+ offset += sizeof (gpointer);
+ offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
if (sparc_is_imm13 (offset))
sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
else {
sparc_set (code, offset, sparc_o7);
sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
}
- g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
- sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
+ /* Round localloc_offset too so the result is at least 8 aligned */
+ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
+ g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
+ sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
guint32 *br [2];
int i;
}
break;
}
- case CEE_RET:
- /* The return is done in the epilog */
- g_assert_not_reached ();
- break;
- case CEE_THROW:
+ case OP_THROW:
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
break;
}
- case CEE_ENDFINALLY: {
+ case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (!sparc_is_imm13 (spvar->inst_offset)) {
sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
case OP_LABEL:
ins->inst_c0 = (guint8*)code - cfg->native_code;
break;
- case CEE_BR:
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
+ break;
+ case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
break;
case OP_COND_EXC_NO:
case OP_COND_EXC_C:
case OP_COND_EXC_NC:
+ case OP_COND_EXC_IEQ:
+ case OP_COND_EXC_INE_UN:
+ case OP_COND_EXC_ILT:
+ case OP_COND_EXC_ILT_UN:
+ case OP_COND_EXC_IGT:
+ case OP_COND_EXC_IGT_UN:
+ case OP_COND_EXC_IGE:
+ case OP_COND_EXC_IGE_UN:
+ case OP_COND_EXC_ILE:
+ case OP_COND_EXC_ILE_UN:
+ case OP_COND_EXC_IOV:
+ case OP_COND_EXC_INO:
+ case OP_COND_EXC_IC:
+ case OP_COND_EXC_INC:
+#ifdef SPARCV9
+ NOT_IMPLEMENTED;
+#else
EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
+#endif
break;
case OP_SPARC_COND_EXC_EQZ:
EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
case OP_SPARC_COND_EXC_NEZ:
EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
break;
- case OP_COND_EXC_IOV:
- case OP_COND_EXC_IC:
- EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
- break;
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLT:
- case CEE_BLT_UN:
- case CEE_BGT:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BGE_UN:
- case CEE_BLE:
- case CEE_BLE_UN: {
- if (sparcv9)
- EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
- else
- EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
- break;
- }
case OP_IBEQ:
case OP_IBNE_UN:
case OP_IBGE_UN:
case OP_IBLE:
case OP_IBLE_UN: {
- /* Only used on V9 */
- EMIT_COND_BRANCH_ICC (ins, opcode_to_sparc_cond (ins->opcode), 1, 1, sparc_icc_short);
+ if (sparcv9)
+ EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
+ else
+ EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
break;
}
sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
break;
}
- case OP_FMOVE:
-#ifdef SPARCV9
- sparc_fmovd (code, ins->sreg1, ins->dreg);
-#else
- sparc_fmovs (code, ins->sreg1, ins->dreg);
- sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
-#endif
- break;
- case CEE_CONV_R4: {
- gint32 offset = mono_spillvar_offset_float (cfg, 0);
+ case OP_ICONV_TO_R4: {
+ gint32 offset = cfg->arch.float_spill_slot_offset;
#ifdef SPARCV9
if (!sparc_is_imm13 (offset)) {
sparc_set (code, offset, sparc_o7);
sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
break;
}
- case CEE_CONV_R8: {
- gint32 offset = mono_spillvar_offset_float (cfg, 0);
+ case OP_ICONV_TO_R8: {
+ gint32 offset = cfg->arch.float_spill_slot_offset;
#ifdef SPARCV9
if (!sparc_is_imm13 (offset)) {
sparc_set (code, offset, sparc_o7);
#endif
case OP_FCONV_TO_I4:
case OP_FCONV_TO_U4: {
- gint32 offset = mono_spillvar_offset_float (cfg, 0);
+ gint32 offset = cfg->arch.float_spill_slot_offset;
sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
if (!sparc_is_imm13 (offset)) {
sparc_set (code, offset, sparc_o7);
/* Emulated */
g_assert_not_reached ();
break;
- case CEE_CONV_R_UN:
- /* Emulated */
- g_assert_not_reached ();
+ case OP_FCONV_TO_R4:
+ /* FIXME: Change precision ? */
+#ifdef SPARCV9
+ sparc_fmovd (code, ins->sreg1, ins->dreg);
+#else
+ sparc_fmovs (code, ins->sreg1, ins->dreg);
+ sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
+#endif
break;
case OP_LCONV_TO_R_UN: {
/* Emulated */
g_assert_not_reached ();
break;
}
- case OP_LCONV_TO_OVF_I: {
+ case OP_LCONV_TO_OVF_I:
+ case OP_LCONV_TO_OVF_I4_2: {
guint32 *br [3], *label [1];
/*
EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
break;
- case CEE_CKFINITE: {
- gint32 offset = mono_spillvar_offset_float (cfg, 0);
+ case OP_CKFINITE: {
+ gint32 offset = cfg->arch.float_spill_slot_offset;
if (!sparc_is_imm13 (offset)) {
sparc_set (code, offset, sparc_o7);
sparc_stdf (code, ins->sreg1, sparc_sp, sparc_o7);
cpos += max_len;
last_ins = ins;
-
- ins = ins->next;
}
cfg->code_len = (guint8*)code - cfg->native_code;
void
mono_arch_register_lowlevel_calls (void)
{
- mono_register_jit_icall (mono_arch_break, "mono_arch_break", NULL, TRUE);
mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
}
for (i = 0; i < 6; ++i)
sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* Save float regs on V9, since they are caller saved */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
sig = mono_method_signature (method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* Keep in sync with emit_load_volatile_arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
can_fold = 1;
+ if (cfg->new_ir) {
+ /*
+ * FIXME: The last instruction might have a branch pointing into it like in
+ * int_ceq sparc_i0 <-
+ */
+ can_fold = 0;
+ }
+
/* Try folding last instruction into the restore */
if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
/* or reg, imm, %i0 */
int reg = sparc_inst_rs1 (code [-2]);
- int imm = sparc_inst_imm13 (code [-2]);
+ int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
code [-2] = code [-1];
code --;
sparc_restore_imm (code, reg, imm, sparc_o0);
sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
- type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
g_assert (exc_class);
+ type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
throw_ip = patch_info->ip.i;
/* Find a throw sequence for the same exception class */
if (vt_reg != -1) {
#ifdef SPARCV9
MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_SETREG);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->sreg1 = vt_reg;
ins->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, ins);
/* add the this argument */
if (this_reg != -1) {
MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_SETREG);
+ MONO_INST_NEW (cfg, this, OP_MOVE);
this->type = this_type;
this->sreg1 = this_reg;
this->dreg = mono_regstate_next_int (cfg->rs);
{
MonoInst *ins = NULL;
- if (cmethod->klass == mono_defaults.thread_class &&
- strcmp (cmethod->name, "MemoryBarrier") == 0) {
- if (sparcv9)
- MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
- }
+ return ins;
+}
+
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
return ins;
}
CallInfo *cinfo;
ArgInfo *ainfo;
- cinfo = get_call_info (csig, FALSE);
+ cinfo = get_call_info (NULL, csig, FALSE);
if (csig->hasthis) {
ainfo = &cinfo->args [0];
{
return NULL;
}
+
+gpointer
+mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
+{
+ /* FIXME: implement */
+ g_assert_not_reached ();
+}