mono_arch_cpu_optimizazions(&dummy);
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
}
static void
-mono_sparc_break (void)
+mono_arch_break (void)
{
}
/* Hopefully this is optimized based on the actual CPU */
sync_instruction_memory (code, size);
#else
- guint64 *p = (guint64*)code;
- guint64 *end = (guint64*)(code + ((size + 8) /8));
-
- /*
- * FIXME: Flushing code in dword chunks in _slow_.
+ gulong start = (gulong) code;
+ gulong end = start + size;
+ gulong align;
+
+ /* Sparcv9 chips only need flushes on 32 byte
+ * cacheline boundaries.
+ *
+ * Sparcv8 needs a flush every 8 bytes.
*/
- while (p < end)
+ align = (sparcv9 ? 32 : 8);
+
+ start &= ~(align - 1);
+ end = (end + (align - 1)) & ~(align - 1);
+
+ while (start < end) {
#ifdef __GNUC__
- __asm__ __volatile__ ("iflush %0"::"r"(p++));
+ __asm__ __volatile__ ("iflush %0"::"r"(start));
#else
- flushi (p ++);
+ flushi (start);
#endif
+ start += align;
+ }
#endif
}
g_assert ((code - start) < 64);
+ mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
+
flushw = (gpointer)start;
inited = 1;
case MONO_TYPE_ARRAY:
add_general (&gr, &stack_size, ainfo, FALSE);
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ add_general (&gr, &stack_size, ainfo, FALSE);
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
#ifdef SPARCV9
if (sig->pinvoke)
cinfo->ret.storage = ArgInFReg;
cinfo->ret.reg = sparc_f0;
break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 1)
+ gr = 1;
+ break;
+ }
+ /* Fall through */
case MONO_TYPE_VALUETYPE:
if (v64) {
if (sig->pinvoke)
return cinfo;
}
-static gboolean
-is_regsize_var (MonoType *t) {
- if (t->byref)
- return TRUE;
- switch (mono_type_get_underlying_type (t)->type) {
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- return TRUE;
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_STRING:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- return TRUE;
- case MONO_TYPE_VALUETYPE:
- return FALSE;
-#ifdef SPARCV9
- case MONO_TYPE_I8:
- case MONO_TYPE_U8:
- return TRUE;
-#endif
- }
- return FALSE;
-}
-
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
continue;
- if (is_regsize_var (ins->inst_vtype)) {
+ if (mono_is_regsize_var (ins->inst_vtype)) {
g_assert (MONO_VARINFO (cfg, i)->reg == -1);
g_assert (i == vmv->idx);
if (inst->flags & MONO_INST_IS_DEAD)
continue;
- /* inst->unused indicates native sized value types, this is used by the
+ /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
* pinvoke wrappers when they call functions returning structure */
- if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
+ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
else
- size = mono_type_stack_size (inst->inst_vtype, &align);
+ size = mini_type_stack_size (m->generic_sharing_context, inst->inst_vtype, &align);
/*
* This is needed since structures containing doubles must be doubleword
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = m->varinfo [i];
+ inst = m->args [i];
if (inst->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
return group;
}
+static void
+emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoInst *arg;
+ MonoMethodSignature *tmp_sig;
+ MonoInst *sig_arg;
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+ /* We allways pass the signature on the stack for simplicity */
+ MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
+ arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset);
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->inst_p0 = tmp_sig;
+ arg->inst_left = sig_arg;
+ arg->type = STACK_PTR;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+}
+
/*
* take the arguments and generate the arch-specific
* instructions to properly call the function in call.
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* Emit the signature cookie just before the first implicit argument */
- MonoInst *sig_arg;
- MonoMethodSignature *tmp_sig;
-
- /*
- * mono_ArgIterator_Setup assumes the signature cookie is
- * passed first and all the arguments which were before it are
- * passed on the stack after the signature. So compensate by
- * passing a different signature.
- */
- tmp_sig = mono_metadata_signature_dup (call->signature);
- tmp_sig->param_count -= call->signature->sentinelpos;
- tmp_sig->sentinelpos = 0;
- memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
-
- /* FIXME: Add support for signature tokens to AOT */
- cfg->disable_aot = TRUE;
- /* We allways pass the signature on the stack for simplicity */
- MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
- arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset);
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->inst_p0 = tmp_sig;
- arg->inst_left = sig_arg;
- arg->type = STACK_PTR;
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
+ emit_sig_cookie (cfg, call, cinfo);
}
if (is_virtual && i == 0) {
else
if (sig->pinvoke)
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
- else
- size = mono_type_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Can't use mini_type_stack_size (), but that
+ * aligns the size to sizeof (gpointer), which is larger
+ * than the size of the source, leading to reads of invalid
+ * memory if the source is at the end of address space or
+ * misaligned reads.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
/*
* We use OP_OUTARG_VT to copy the valuetype to a stack location, then
pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
inst->inst_c1 = STACK_BIAS + offset;
- inst->unused = size;
+ inst->backend.size = size;
arg->inst_left = inst;
cinfo->stack_usage += size;
case ArgInIRegPair:
if (ainfo->storage == ArgInIRegPair)
arg->opcode = OP_SPARC_OUTARG_REGPAIR;
- arg->unused = sparc_o0 + ainfo->reg;
+ arg->backend.reg3 = sparc_o0 + ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
if ((i >= sig->hasthis) && !sig->params [i - sig->hasthis]->byref && ((sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) || (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4))) {
break;
case ArgInSplitRegStack:
arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
- arg->unused = sparc_o0 + ainfo->reg;
+ arg->backend.reg3 = sparc_o0 + ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
break;
case ArgInFloatReg:
arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
- arg->unused = sparc_f0 + ainfo->reg;
+ arg->backend.reg3 = sparc_f0 + ainfo->reg;
break;
case ArgInDoubleReg:
arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
- arg->unused = sparc_f0 + ainfo->reg;
+ arg->backend.reg3 = sparc_f0 + ainfo->reg;
break;
default:
NOT_IMPLEMENTED;
}
}
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie (cfg, call, cinfo);
+ }
+
/*
* Reverse the call->out_args list.
*/
#define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
MONO_PATCH_INFO_EXC, sexc_name); \
- if (sparcv9) { \
+ if (sparcv9 && ((icc) != sparc_icc_short)) { \
sparc_branchp (code, 0, (cond), (icc), 0, 0); \
} \
else { \
}
break;
#endif
- case OP_LOADU1_MEMBASE:
case OP_LOADI1_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
}
}
break;
- case OP_LOADU2_MEMBASE:
case OP_LOADI2_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
bb->last_ins = last_ins;
}
-static const char*const * ins_spec = sparc_desc;
-
-static inline const char*
-get_ins_spec (int opcode)
-{
- if (ins_spec [opcode])
- return ins_spec [opcode];
- else
- return ins_spec [CEE_ADD];
-}
-
static int
mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
{
MonoSpillInfo **si, *info;
- int i = 0;
- si = &cfg->spill_info_float;
-
- while (i <= spillvar) {
-
- if (!*si) {
- *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
- info->next = NULL;
- cfg->stack_offset += sizeof (double);
- cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
- info->offset = - cfg->stack_offset;
- }
+ g_assert (spillvar == 0);
- if (i == spillvar)
- return MONO_SPARC_STACK_BIAS + (*si)->offset;
+ si = &cfg->spill_info_float;
- i++;
- si = &(*si)->next;
+ if (!*si) {
+ *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
+ cfg->stack_offset += sizeof (double);
+ cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
+ info->offset = - cfg->stack_offset;
}
- g_assert_not_reached ();
- return 0;
+ return MONO_SPARC_STACK_BIAS + (*si)->offset;
}
/* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
}
static guint32*
-emit_vret_token (MonoInst *ins, guint32 *code)
+emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
{
MonoCallInst *call = (MonoCallInst*)ins;
guint32 size;
*/
if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
- size = mono_type_stack_size (call->signature->ret, NULL);
+ size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
else
size = mono_class_native_size (call->signature->ret->data.klass, NULL);
sparc_unimp (code, size & 0xfff);
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
}
/*
- * mono_arch_get_vcall_slot_addr:
+ * mono_arch_get_vcall_slot:
*
* Determine the vtable slot used by a virtual call.
*/
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8 *code8, gpointer *regs, int *displacement)
{
guint32 *code = (guint32*)(gpointer)code8;
guint32 ins = code [0];
mono_sparc_flushw ();
+ *displacement = 0;
+
if (!mono_sparc_is_virtual_call (code))
return NULL;
if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
- if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
+ if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
/* ld [r1 + CONST ], r2; call r2 */
guint32 base = sparc_inst_rs1 (prev_ins);
- guint32 disp = sparc_inst_imm13 (prev_ins);
+ gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
gpointer base_val;
g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
+
+ *displacement = disp;
- return (gpointer)((guint8*)base_val + disp);
+ return (gpointer)base_val;
}
- else
+ else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
+ /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
+ /* Decode a sparc_set32 */
+ guint32 base = sparc_inst_rs1 (prev_ins);
+ guint32 disp;
+ gpointer base_val;
+ guint32 s1 = code [-3];
+ guint32 s2 = code [-2];
+
+#ifdef SPARCV9
+ NOT_IMPLEMENTED;
+#endif
+
+ /* sparc_sethi */
+ g_assert (sparc_inst_op (s1) == 0);
+ g_assert (sparc_inst_op2 (s1) == 4);
+
+ /* sparc_or_imm */
+ g_assert (sparc_inst_op (s2) == 2);
+ g_assert (sparc_inst_op3 (s2) == 2);
+ g_assert (sparc_inst_i (s2) == 1);
+ g_assert (sparc_inst_rs1 (s2) == sparc_inst_rd (s2));
+ g_assert (sparc_inst_rd (s1) == sparc_inst_rs1 (s2));
+
+ disp = ((s1 & 0x3fffff) << 10) | sparc_inst_imm13 (s2);
+
+ g_assert ((base >= sparc_o0) && (base <= sparc_i7));
+
+ base_val = regs [base];
+
+ *displacement = disp;
+
+ return (gpointer)base_val;
+ } else
g_assert_not_reached ();
}
else
return NULL;
}
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
+
+#define CMP_SIZE 3
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 2
+#define JUMP_IMM_SIZE 5
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint32 *code, *start;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
+ } else {
+ item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
+#endif
+ }
+ } else {
+ item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+ code = mono_code_manager_reserve (domain->code_mp, size * 4);
+ start = code;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ item->code_target = (guint8*)code;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ }
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_bne, 0);
+ sparc_nop (code);
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ }
+ } else {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_beu, 0);
+ sparc_nop (code);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
+
+ mono_stats.imt_thunks_size += (code - start) * 4;
+ g_assert (code - start <= size);
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+#ifdef SPARCV9
+ g_assert_not_reached ();
+#endif
+
+ return (MonoMethod*)regs [sparc_g1];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+ mono_sparc_flushw ();
+
+ return (gpointer)regs [sparc_o0];
+}
+
/*
* Some conventions used in the following code.
* 2) The only scratch registers we have are o7 and g1. We try to
offset = (guint8*)code - cfg->native_code;
- spec = ins_spec [ins->opcode];
- if (!spec)
- spec = ins_spec [CEE_ADD];
+ spec = ins_get_spec (ins->opcode);
+ /* I kept this, but this looks a workaround for a bug */
+ if (spec == MONO_ARCH_CPU_SPEC)
+ spec = ins_get_spec (CEE_ADD);
max_len = ((guint8 *)spec)[MONO_INST_LEN];
sparc_cmp (code, ins->sreg1, sparc_o7);
}
break;
- case OP_X86_TEST_NULL:
- sparc_cmp_imm (code, ins->sreg1, 0);
- break;
- case CEE_BREAK:
+ case OP_BREAK:
/*
* gdb does not like encountering 'ta 1' in the debugged code. So
* instead of emitting a trap, we emit a call a C function and place a
* breakpoint there.
*/
//sparc_ta (code, 1);
- mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_sparc_break);
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_arch_break);
EMIT_CALL();
break;
case OP_ADDCC:
EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
break;
case OP_ICONST:
- case OP_SETREGIMM:
sparc_set (code, ins->inst_c0, ins->dreg);
break;
case OP_I8CONST:
case CEE_CONV_I4:
case CEE_CONV_U4:
case OP_MOVE:
- case OP_SETREG:
if (ins->sreg1 != ins->dreg)
sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
break;
/* Only used on V9 */
sparc_fdtos (code, ins->sreg1, ins->dreg);
break;
- case CEE_JMP:
+ case OP_JMP:
if (cfg->method->save_lmf)
NOT_IMPLEMENTED;
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_REG:
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
- g_assert (sparc_is_imm13 (ins->inst_offset));
code = emit_save_sp_to_lmf (cfg, code);
- sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
+ if (sparc_is_imm13 (ins->inst_offset)) {
+ sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
+ } else {
+ sparc_set (code, ins->inst_offset, sparc_o7);
+ sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
+ }
sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
if (call->virtual)
sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_SETFRET:
#endif
/* Keep alignment */
- sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->dreg);
- sparc_set (code, ~(MONO_ARCH_FRAME_ALIGNMENT - 1), sparc_o7);
+ sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
+ sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
NOT_IMPLEMENTED;
#endif
- offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
+ offset = ALIGN_TO (offset, MONO_ARCH_LOCALLOC_ALIGNMENT);
if (sparc_is_imm13 (offset))
sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
else {
/* The return is done in the epilog */
g_assert_not_reached ();
break;
- case CEE_THROW:
+ case OP_THROW:
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
break;
}
- case CEE_ENDFINALLY: {
+ case OP_ENDFINALLY: {
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (!sparc_is_imm13 (spvar->inst_offset)) {
sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
case OP_LABEL:
ins->inst_c0 = (guint8*)code - cfg->native_code;
break;
- case CEE_BR:
+ case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
break;
break;
case CEE_CONV_R4: {
gint32 offset = mono_spillvar_offset_float (cfg, 0);
- if (!sparc_is_imm13 (offset))
- NOT_IMPLEMENTED;
#ifdef SPARCV9
- sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
- sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ if (!sparc_is_imm13 (offset)) {
+ sparc_set (code, offset, sparc_o7);
+ sparc_stx (code, ins->sreg1, sparc_sp, offset);
+ sparc_lddf (code, sparc_sp, offset, FP_SCRATCH_REG);
+ } else {
+ sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
+ sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ }
sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
#else
- sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
- sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ if (!sparc_is_imm13 (offset)) {
+ sparc_set (code, offset, sparc_o7);
+ sparc_st (code, ins->sreg1, sparc_sp, sparc_o7);
+ sparc_ldf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
+ } else {
+ sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
+ sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ }
sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
#endif
sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
}
case CEE_CONV_R8: {
gint32 offset = mono_spillvar_offset_float (cfg, 0);
- if (!sparc_is_imm13 (offset))
- NOT_IMPLEMENTED;
#ifdef SPARCV9
- sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
- sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ if (!sparc_is_imm13 (offset)) {
+ sparc_set (code, offset, sparc_o7);
+ sparc_stx (code, ins->sreg1, sparc_sp, sparc_o7);
+ sparc_lddf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
+ } else {
+ sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
+ sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ }
sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
#else
- sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
- sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ if (!sparc_is_imm13 (offset)) {
+ sparc_set (code, offset, sparc_o7);
+ sparc_st (code, ins->sreg1, sparc_sp, sparc_o7);
+ sparc_ldf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
+ } else {
+ sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
+ sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
+ }
sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
#endif
break;
case OP_FCONV_TO_I4:
case OP_FCONV_TO_U4: {
gint32 offset = mono_spillvar_offset_float (cfg, 0);
- if (!sparc_is_imm13 (offset))
- NOT_IMPLEMENTED;
sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
- sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
- sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
+ if (!sparc_is_imm13 (offset)) {
+ sparc_set (code, offset, sparc_o7);
+ sparc_stdf (code, FP_SCRATCH_REG, sparc_sp, sparc_o7);
+ sparc_ld (code, sparc_sp, sparc_o7, ins->dreg);
+ } else {
+ sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
+ sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
+ }
switch (ins->opcode) {
case OP_FCONV_TO_I1:
EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
break;
- case CEE_CKFINITE: {
+ case OP_CKFINITE: {
gint32 offset = mono_spillvar_offset_float (cfg, 0);
- if (!sparc_is_imm13 (offset))
- NOT_IMPLEMENTED;
- sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
- sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
+ if (!sparc_is_imm13 (offset)) {
+ sparc_set (code, offset, sparc_o7);
+ sparc_stdf (code, ins->sreg1, sparc_sp, sparc_o7);
+ sparc_lduh (code, sparc_sp, sparc_o7, sparc_o7);
+ } else {
+ sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
+ sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
+ }
sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
sparc_cmp_imm (code, sparc_o7, 2047);
#endif
break;
}
+
+ case OP_MEMORY_BARRIER:
+ sparc_membar (code, sparc_membar_all);
+ break;
+
default:
#ifdef __GNUC__
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
void
mono_arch_register_lowlevel_calls (void)
{
- mono_register_jit_icall (mono_sparc_break, "mono_sparc_break", NULL, TRUE);
+ mono_register_jit_icall (mono_arch_break, "mono_arch_break", NULL, TRUE);
mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
}
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
- inst = cfg->varinfo [i];
+ inst = cfg->args [i];
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
- type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
g_assert (exc_class);
+ type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
throw_ip = patch_info->ip.i;
/* Find a throw sequence for the same exception class */
*/
#error "--with-sigaltstack=yes not supported on solaris"
-static void
-setup_stack (MonoJitTlsData *tls)
-{
-#ifdef __linux__
- struct sigaltstack sa;
-#else
- stack_t sigstk;
-#endif
-
- /* Setup an alternate signal stack */
- tls->signal_stack = mmap (0, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
- tls->signal_stack_size = SIGNAL_STACK_SIZE;
-
-#ifdef __linux__
- sa.ss_sp = tls->signal_stack;
- sa.ss_size = SIGNAL_STACK_SIZE;
- sa.ss_flags = 0;
- g_assert (sigaltstack (&sa, NULL) == 0);
-#else
- sigstk.ss_sp = tls->signal_stack;
- sigstk.ss_size = SIGNAL_STACK_SIZE;
- sigstk.ss_flags = 0;
- g_assert (sigaltstack (&sigstk, NULL) == 0);
-#endif
-}
-
#endif
void
#else
pthread_setspecific (lmf_addr_key, &tls->lmf);
#endif
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
- setup_stack (tls);
-#endif
}
void
if (vt_reg != -1) {
#ifdef SPARCV9
MonoInst *ins;
- MONO_INST_NEW (cfg, ins, OP_SETREG);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
ins->sreg1 = vt_reg;
ins->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, ins);
- mono_call_inst_add_outarg_reg (call, ins->dreg, sparc_o0, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, sparc_o0, FALSE);
this_out_reg = sparc_o1;
#else
/* add the this argument */
if (this_reg != -1) {
MonoInst *this;
- MONO_INST_NEW (cfg, this, OP_SETREG);
+ MONO_INST_NEW (cfg, this, OP_MOVE);
this->type = this_type;
this->sreg1 = this_reg;
this->dreg = mono_regstate_next_int (cfg->rs);
mono_bblock_add_inst (cfg->cbb, this);
- mono_call_inst_add_outarg_reg (call, this->dreg, this_out_reg, FALSE);
+ mono_call_inst_add_outarg_reg (cfg, call, this->dreg, this_out_reg, FALSE);
}
}
MonoInst*
mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
- return NULL;
+ MonoInst *ins = NULL;
+
+ if (cmethod->klass == mono_defaults.thread_class &&
+ strcmp (cmethod->name, "MemoryBarrier") == 0) {
+ if (sparcv9)
+ MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
+ }
+
+ return ins;
}
/*