mono_arch_cpu_optimizazions(&dummy);
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
/* Hopefully this is optimized based on the actual CPU */
sync_instruction_memory (code, size);
#else
- guint64 *p = (guint64*)code;
- guint64 *end = (guint64*)(code + ((size + 8) /8));
-
- /*
- * FIXME: Flushing code in dword chunks in _slow_.
+ gulong start = (gulong) code;
+ gulong end = start + size;
+ gulong align;
+
+ /* Sparcv9 chips only need flushes on 32 byte
+ * cacheline boundaries.
+ *
+ * Sparcv8 needs a flush every 8 bytes.
*/
- while (p < end)
+ align = (sparcv9 ? 32 : 8);
+
+ start &= ~(align - 1);
+ end = (end + (align - 1)) & ~(align - 1);
+
+ while (start < end) {
#ifdef __GNUC__
- __asm__ __volatile__ ("iflush %0"::"r"(p++));
+ __asm__ __volatile__ ("iflush %0"::"r"(start));
#else
- flushi (p ++);
+ flushi (start);
#endif
+ start += align;
+ }
#endif
}
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
else
- size = mono_type_stack_size (inst->inst_vtype, &align);
+ size = mini_type_stack_size (m->generic_sharing_context, inst->inst_vtype, &align);
/*
* This is needed since structures containing doubles must be doubleword
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = m->varinfo [i];
+ inst = m->args [i];
if (inst->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
else {
/*
- * Can't use mono_type_stack_size (), but that
+ * Can't use mini_type_stack_size (), but that
* aligns the size to sizeof (gpointer), which is larger
* than the size of the source, leading to reads of invalid
* memory if the source is at the end of address space or
#define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
MONO_PATCH_INFO_EXC, sexc_name); \
- if (sparcv9) { \
+ if (sparcv9 && ((icc) != sparc_icc_short)) { \
sparc_branchp (code, 0, (cond), (icc), 0, 0); \
} \
else { \
mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
{
MonoSpillInfo **si, *info;
- int i = 0;
- si = &cfg->spill_info_float;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset += sizeof (double);
- cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
- info->offset = - cfg->stack_offset;
- }
+ g_assert (spillvar == 0);
- if (i == spillvar)
- return MONO_SPARC_STACK_BIAS + (*si)->offset;
+ si = &cfg->spill_info_float;
- i++;
- si = &(*si)->next;
+ if (!*si) {
+ *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
+ cfg->stack_offset += sizeof (double);
+ cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
+ info->offset = - cfg->stack_offset;
}
- g_assert_not_reached ();
- return 0;
+ return MONO_SPARC_STACK_BIAS + (*si)->offset;
}
/* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
}
static guint32*
-emit_vret_token (MonoInst *ins, guint32 *code)
+emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
{
MonoCallInst *call = (MonoCallInst*)ins;
guint32 size;
*/
if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
- size = mono_type_stack_size (call->signature->ret, NULL);
+ size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
else
size = mono_class_native_size (call->signature->ret->data.klass, NULL);
sparc_unimp (code, size & 0xfff);
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
}
/*
- * mono_arch_get_vcall_slot_addr:
+ * mono_arch_get_vcall_slot:
*
* Determine the vtable slot used by a virtual call.
*/
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8 *code8, gpointer *regs, int *displacement)
{
guint32 *code = (guint32*)(gpointer)code8;
guint32 ins = code [0];
mono_sparc_flushw ();
+ *displacement = 0;
+
if (!mono_sparc_is_virtual_call (code))
return NULL;
if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
/* ld [r1 + CONST ], r2; call r2 */
guint32 base = sparc_inst_rs1 (prev_ins);
- guint32 disp = sparc_inst_imm13 (prev_ins);
+ gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
gpointer base_val;
g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
+
+ *displacement = disp;
- return (gpointer)((guint8*)base_val + disp);
+ return (gpointer)base_val;
}
else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
/* set r1, ICONST; ld [r1 + r2], r2; call r2 */
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
+
+ *displacement = disp;
- return (gpointer)((guint8*)base_val + disp);
+ return (gpointer)base_val;
} else
g_assert_not_reached ();
}
return NULL;
}
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
+
+#define CMP_SIZE 3
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 2
+#define JUMP_IMM_SIZE 5
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint32 *code, *start;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
+ } else {
+ item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
+#endif
+ }
+ } else {
+ item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+ code = mono_code_manager_reserve (domain->code_mp, size * 4);
+ start = code;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ item->code_target = (guint8*)code;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ }
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_bne, 0);
+ sparc_nop (code);
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ }
+ } else {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_beu, 0);
+ sparc_nop (code);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
+
+ mono_stats.imt_thunks_size += (code - start) * 4;
+ g_assert (code - start <= size);
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+#ifdef SPARCV9
+ g_assert_not_reached ();
+#endif
+
+ return (MonoMethod*)regs [sparc_g1];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+ mono_sparc_flushw ();
+
+ return (gpointer)regs [sparc_o0];
+}
+
/*
* Some conventions used in the following code.
* 2) The only scratch registers we have are o7 and g1. We try to
sparc_cmp (code, ins->sreg1, sparc_o7);
}
break;
- case CEE_BREAK:
+ case OP_BREAK:
/*
* gdb does not like encountering 'ta 1' in the debugged code. So
* instead of emitting a trap, we emit a call a C function and place a
EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
break;
case OP_ICONST:
- case OP_SETREGIMM:
sparc_set (code, ins->inst_c0, ins->dreg);
break;
case OP_I8CONST:
case CEE_CONV_I4:
case CEE_CONV_U4:
case OP_MOVE:
- case OP_SETREG:
if (ins->sreg1 != ins->dreg)
sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
break;
/* Only used on V9 */
sparc_fdtos (code, ins->sreg1, ins->dreg);
break;
- case CEE_JMP:
+ case OP_JMP:
if (cfg->method->save_lmf)
NOT_IMPLEMENTED;
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_REG:
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_MEMBASE:
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_SETFRET:
/* The return is done in the epilog */
g_assert_not_reached ();
break;
- case CEE_THROW:
+ case OP_THROW:
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
break;
}
- case CEE_ENDFINALLY: {
+ case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (!sparc_is_imm13 (spvar->inst_offset)) {
sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
case OP_LABEL:
ins->inst_c0 = (guint8*)code - cfg->native_code;
break;
- case CEE_BR:
+ case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
break;
EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
break;
- case CEE_CKFINITE: {
+ case OP_CKFINITE: {
gint32 offset = mono_spillvar_offset_float (cfg, 0);
if (!sparc_is_imm13 (offset)) {
sparc_set (code, offset, sparc_o7);
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
- type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
g_assert (exc_class);
+ type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
throw_ip = patch_info->ip.i;
/* Find a throw sequence for the same exception class */
if (vt_reg != -1) {
#ifdef SPARCV9
MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_SETREG);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->sreg1 = vt_reg;
ins->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, ins);
/* add the this argument */
if (this_reg != -1) {
MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_SETREG);
+ MONO_INST_NEW (cfg, this, OP_MOVE);
this->type = this_type;
this->sreg1 = this_reg;
this->dreg = mono_regstate_next_int (cfg->rs);