#include "inssel.h"
#include "trace.h"
#include "cpu-sparc.h"
+#include "jit-icalls.h"
/*
* Sparc V9 means two things:
#endif
#endif
-#define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
-
#define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
#define SIGNAL_STACK_SIZE (64 * 1024)
mono_arch_cpu_optimizazions(&dummy);
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
return opts;
}
-static void
-mono_arch_break (void)
-{
-}
-
#ifdef __GNUC__
#define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
#else /* assume Sun's compiler */
/* Hopefully this is optimized based on the actual CPU */
sync_instruction_memory (code, size);
#else
- guint64 *p = (guint64*)code;
- guint64 *end = (guint64*)(code + ((size + 8) /8));
-
- /*
- * FIXME: Flushing code in dword chunks in _slow_.
+ gulong start = (gulong) code;
+ gulong end = start + size;
+ gulong align;
+
+ /* Sparcv9 chips only need flushes on 32 byte
+ * cacheline boundaries.
+ *
+ * Sparcv8 needs a flush every 8 bytes.
*/
- while (p < end)
+ align = (sparcv9 ? 32 : 8);
+
+ start &= ~(align - 1);
+ end = (end + (align - 1)) & ~(align - 1);
+
+ while (start < end) {
#ifdef __GNUC__
- __asm__ __volatile__ ("iflush %0"::"r"(p++));
+ __asm__ __volatile__ ("iflush %0"::"r"(start));
#else
- flushi (p ++);
+ flushi (start);
#endif
+ start += align;
+ }
#endif
}
* the 'Sparc Compliance Definition 2.4' document.
*/
static CallInfo*
-get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
{
guint32 i, gr, fr;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
CallInfo *cinfo;
+ MonoType *ret_type;
+ MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
for (i = 0; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
+ MonoType *ptype;
if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, ainfo, FALSE);
continue;
}
- switch (mono_type_get_underlying_type (sig->params [i])->type) {
+ ptype = mono_type_get_underlying_type (sig->params [i]);
+ ptype = mini_get_basic_type_from_generic (gsctx, ptype);
+ switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
}
/* return value */
- {
- switch (mono_type_get_underlying_type (sig->ret)->type) {
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_STRING:
- cinfo->ret.storage = ArgInIReg;
- cinfo->ret.reg = sparc_i0;
- if (gr < 1)
- gr = 1;
- break;
- case MONO_TYPE_U8:
- case MONO_TYPE_I8:
+ ret_type = mono_type_get_underlying_type (sig->ret);
+ ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
+ switch (ret_type->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 1)
+ gr = 1;
+ break;
+ case MONO_TYPE_U8:
+ case MONO_TYPE_I8:
#ifdef SPARCV9
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 1)
+ gr = 1;
+#else
+ cinfo->ret.storage = ArgInIRegPair;
+ cinfo->ret.reg = sparc_i0;
+ if (gr < 2)
+ gr = 2;
+#endif
+ break;
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ cinfo->ret.storage = ArgInFReg;
+ cinfo->ret.reg = sparc_f0;
+ break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = sparc_i0;
if (gr < 1)
gr = 1;
-#else
- cinfo->ret.storage = ArgInIRegPair;
- cinfo->ret.reg = sparc_i0;
- if (gr < 2)
- gr = 2;
-#endif
break;
- case MONO_TYPE_R4:
- case MONO_TYPE_R8:
- cinfo->ret.storage = ArgInFReg;
- cinfo->ret.reg = sparc_f0;
- break;
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
- cinfo->ret.storage = ArgInIReg;
- cinfo->ret.reg = sparc_i0;
- if (gr < 1)
- gr = 1;
- break;
- }
- /* Fall through */
- case MONO_TYPE_VALUETYPE:
- if (v64) {
- if (sig->pinvoke)
- NOT_IMPLEMENTED;
- else
- /* Already done */
- ;
- }
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (v64) {
+ if (sig->pinvoke)
+ NOT_IMPLEMENTED;
else
- cinfo->ret.storage = ArgOnStack;
- break;
- case MONO_TYPE_TYPEDBYREF:
- if (v64) {
- if (sig->pinvoke)
- /* Same as a valuetype with size 24 */
- NOT_IMPLEMENTED;
- else
- /* Already done */
- ;
- }
+ /* Already done */
+ ;
+ }
+ else
+ cinfo->ret.storage = ArgOnStack;
+ break;
+ case MONO_TYPE_TYPEDBYREF:
+ if (v64) {
+ if (sig->pinvoke)
+ /* Same as a valuetype with size 24 */
+ NOT_IMPLEMENTED;
else
- cinfo->ret.storage = ArgOnStack;
- break;
- case MONO_TYPE_VOID:
- break;
- default:
- g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ /* Already done */
+ ;
}
+ else
+ cinfo->ret.storage = ArgOnStack;
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
}
cinfo->stack_usage = stack_size;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* Use unused input registers */
for (i = cinfo->reg_usage; i < 6; ++i)
sig = mono_method_signature (m->method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (m, sig, FALSE);
if (sig->ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
else
- size = mono_type_stack_size (inst->inst_vtype, &align);
+ size = mini_type_stack_size (m->generic_sharing_context, inst->inst_vtype, &align);
/*
* This is needed since structures containing doubles must be doubleword
sig_arg->inst_p0 = tmp_sig;
arg->inst_left = sig_arg;
arg->type = STACK_PTR;
- /* prepend, so they get reversed */
- arg->next = call->out_args;
- call->out_args = arg;
+ MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
}
/*
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (sig, sig->pinvoke);
+ cinfo = get_call_info (cfg, sig, sig->pinvoke);
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
- /* prepend, we'll need to reverse them later */
- arg->next = call->out_args;
- call->out_args = arg;
+ MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
MonoInst *inst;
size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
else {
/*
- * Can't use mono_type_stack_size (), but that
+ * Can't use mini_type_stack_size (), but that
* aligns the size to sizeof (gpointer), which is larger
* than the size of the source, leading to reads of invalid
* memory if the source is at the end of address space or
emit_sig_cookie (cfg, call, cinfo);
}
- /*
- * Reverse the call->out_args list.
- */
- {
- MonoInst *prev = NULL, *list = call->out_args, *next;
- while (list) {
- next = list->next;
- list->next = prev;
- prev = list;
- list = next;
- }
- call->out_args = prev;
- }
call->stack_usage = cinfo->stack_usage + extra_space;
call->out_ireg_args = NULL;
call->out_freg_args = NULL;
#define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
MONO_PATCH_INFO_EXC, sexc_name); \
- if (sparcv9) { \
+ if (sparcv9 && ((icc) != sparc_icc_short)) { \
sparc_branchp (code, 0, (cond), (icc), 0, 0); \
} \
else { \
static void
peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *last_ins = NULL;
- ins = bb->code;
+ MonoInst *ins, *n;
- while (ins) {
+ MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (ins, n, &bb->ins_list, node) {
+ MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
switch (ins->opcode) {
case OP_MUL_IMM:
if (ins->dreg != ins->sreg1) {
ins->opcode = OP_MOVE;
} else {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
}
}
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->dreg) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
} else {
ins->opcode = OP_MOVE;
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
} else {
//static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
if (sparcv9) {
last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
last_ins->inst_offset = ins->inst_offset;
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
}
}
if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
(last_ins->opcode == OP_COMPARE_IMM) &&
(last_ins->inst_imm == 0)) {
- MonoInst *next = ins->next;
switch (ins->opcode) {
case CEE_BEQ:
ins->opcode = OP_SPARC_BRZ;
default:
g_assert_not_reached ();
}
- ins->sreg1 = last_ins->sreg1;
- *last_ins = *ins;
- last_ins->next = next;
- ins = next;
+ last_ins->data = ins->data;
+ last_ins->opcode = ins->opcode;
+ last_ins->type = ins->type;
+ last_ins->ssa_op = ins->ssa_op;
+ last_ins->flags = ins->flags;
+ last_ins->dreg = ins->dreg;
+ last_ins->sreg2 = ins->sreg2;
+ last_ins->backend = ins->backend;
+ last_ins->klass = ins->klass;
+ last_ins->cil_code = ins->cil_code;
+ MONO_DEL_INS (ins);
continue;
}
break;
* OP_MOVE reg, reg
*/
if (ins->dreg == ins->sreg1) {
- if (last_ins)
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
}
/*
if (last_ins && last_ins->opcode == OP_MOVE &&
ins->sreg1 == last_ins->dreg &&
ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
+ MONO_DEL_INS (ins);
continue;
}
break;
}
- last_ins = ins;
- ins = ins->next;
}
- bb->last_ins = last_ins;
}
static int
}
static guint32*
-emit_vret_token (MonoInst *ins, guint32 *code)
+emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
{
MonoCallInst *call = (MonoCallInst*)ins;
guint32 size;
*/
if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
- size = mono_type_stack_size (call->signature->ret, NULL);
+ size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
else
size = mono_class_native_size (call->signature->ret->data.klass, NULL);
sparc_unimp (code, size & 0xfff);
sig = mono_method_signature (method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* This is the opposite of the code in emit_prolog */
}
/*
- * mono_arch_get_vcall_slot_addr:
+ * mono_arch_get_vcall_slot:
*
* Determine the vtable slot used by a virtual call.
*/
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
+gpointer
+mono_arch_get_vcall_slot (guint8 *code8, gpointer *regs, int *displacement)
{
guint32 *code = (guint32*)(gpointer)code8;
guint32 ins = code [0];
mono_sparc_flushw ();
+ *displacement = 0;
+
if (!mono_sparc_is_virtual_call (code))
return NULL;
if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
/* ld [r1 + CONST ], r2; call r2 */
guint32 base = sparc_inst_rs1 (prev_ins);
- guint32 disp = sparc_inst_imm13 (prev_ins);
+ gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
gpointer base_val;
g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
+
+ *displacement = disp;
- return (gpointer)((guint8*)base_val + disp);
+ return (gpointer)base_val;
}
else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
/* set r1, ICONST; ld [r1 + r2], r2; call r2 */
g_assert ((base >= sparc_o0) && (base <= sparc_i7));
- base_val = regs [base - sparc_o0];
+ base_val = regs [base];
+
+ *displacement = disp;
- return (gpointer)((guint8*)base_val + disp);
+ return (gpointer)base_val;
} else
g_assert_not_reached ();
}
return NULL;
}
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
+
+#define CMP_SIZE 3
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 2
+#define JUMP_IMM_SIZE 5
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint32 *code, *start;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
+ } else {
+ item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
+#endif
+ }
+ } else {
+ item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+ code = mono_code_manager_reserve (domain->code_mp, size * 4);
+ start = code;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ item->code_target = (guint8*)code;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ }
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_bne, 0);
+ sparc_nop (code);
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
+ sparc_ld (code, sparc_g5, 0, sparc_g5);
+ sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
+ sparc_nop (code);
+#if ENABLE_WRONG_METHOD_CHECK
+ g_assert_not_reached ();
+#endif
+ }
+ } else {
+ sparc_set (code, (guint32)item->method, sparc_g5);
+ sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
+ item->jmp_code = (guint8*)code;
+ sparc_branch (code, 0, sparc_beu, 0);
+ sparc_nop (code);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
+
+ mono_stats.imt_thunks_size += (code - start) * 4;
+ g_assert (code - start <= size);
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+#ifdef SPARCV9
+ g_assert_not_reached ();
+#endif
+
+ return (MonoMethod*)regs [sparc_g1];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+ mono_sparc_flushw ();
+
+ return (gpointer)regs [sparc_o0];
+}
+
/*
* Some conventions used in the following code.
* 2) The only scratch registers we have are o7 and g1. We try to
MonoCallInst *call;
guint offset;
guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
- MonoInst *last_ins = NULL;
int max_len, cpos;
const char *spec;
NOT_IMPLEMENTED;
}
- ins = bb->code;
- while (ins) {
+ MONO_BB_FOR_EACH_INS (bb, ins) {
guint8* code_start;
offset = (guint8*)code - cfg->native_code;
spec = ins_get_spec (ins->opcode);
- /* I kept this, but this looks a workaround for a bug */
- if (spec == MONO_ARCH_CPU_SPEC)
- spec = ins_get_spec (CEE_ADD);
max_len = ((guint8 *)spec)[MONO_INST_LEN];
case OP_STORE_MEMBASE_REG:
EMIT_STORE_MEMBASE_REG (ins, sti);
break;
- case CEE_LDIND_I:
-#ifdef SPARCV9
- sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
-#else
- sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
-#endif
- break;
- case CEE_LDIND_I4:
-#ifdef SPARCV9
- sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
-#else
- sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
-#endif
- break;
- case CEE_LDIND_U4:
- sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
- break;
case OP_LOADU4_MEM:
sparc_set (code, ins->inst_c0, ins->dreg);
sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
* breakpoint there.
*/
//sparc_ta (code, 1);
- mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_arch_break);
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
EMIT_CALL();
break;
case OP_ADDCC:
if (ins->sreg1 != ins->dreg)
sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
break;
- case OP_SETFREG:
+ case OP_FMOVE:
/* Only used on V9 */
if (ins->sreg1 != ins->dreg)
sparc_fmovd (code, ins->sreg1, ins->dreg);
break;
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
- sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
+ /* Might be misaligned in case of vtypes so use a byte load */
+ sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
break;
case OP_ARGLIST:
sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_REG:
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_FCALL_MEMBASE:
else
sparc_nop (code);
- code = emit_vret_token (ins, code);
+ code = emit_vret_token (cfg->generic_sharing_context, ins, code);
code = emit_move_return_value (ins, code);
break;
case OP_SETFRET:
}
break;
}
- case CEE_RET:
- /* The return is done in the epilog */
- g_assert_not_reached ();
- break;
case OP_THROW:
sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
ins->inst_c0 = (guint8*)code - cfg->native_code;
break;
case OP_BR:
- //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
- if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
+ if ((ins->inst_target_bb == bb->next_bb) &&
+ ins->node.next == &bb->ins_list)
break;
if (ins->flags & MONO_INST_BRLABEL) {
if (ins->inst_i0->inst_c0) {
}
cpos += max_len;
-
- last_ins = ins;
-
- ins = ins->next;
}
cfg->code_len = (guint8*)code - cfg->native_code;
void
mono_arch_register_lowlevel_calls (void)
{
- mono_register_jit_icall (mono_arch_break, "mono_arch_break", NULL, TRUE);
mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
}
for (i = 0; i < 6; ++i)
sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* Save float regs on V9, since they are caller saved */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
sig = mono_method_signature (method);
- cinfo = get_call_info (sig, FALSE);
+ cinfo = get_call_info (cfg, sig, FALSE);
/* Keep in sync with emit_load_volatile_arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
/* or reg, imm, %i0 */
int reg = sparc_inst_rs1 (code [-2]);
- int imm = sparc_inst_imm13 (code [-2]);
+ int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
code [-2] = code [-1];
code --;
sparc_restore_imm (code, reg, imm, sparc_o0);
sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
- type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
g_assert (exc_class);
+ type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
throw_ip = patch_info->ip.i;
/* Find a throw sequence for the same exception class */
CallInfo *cinfo;
ArgInfo *ainfo;
- cinfo = get_call_info (csig, FALSE);
+ cinfo = get_call_info (NULL, csig, FALSE);
if (csig->hasthis) {
ainfo = &cinfo->args [0];