#define BREAKPOINT_SIZE (PPC_LOAD_SEQUENCE_LENGTH + 4)
/* This mutex protects architecture specific caches */
-#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
-#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
-static CRITICAL_SECTION mini_arch_mutex;
+#define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
+#define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
+static mono_mutex_t mini_arch_mutex;
int mono_exc_esp_offset = 0;
static int tls_mode = TLS_MODE_DETECT;
/* FIXME: ensure the sc call preserves all but r3 */
#define emit_darwing4_tls(code,dreg,key) do {\
int off1 = 0x48 + key * sizeof (gpointer); \
- if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r11, ppc_r3); \
+ if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r12, ppc_r3); \
ppc_li ((code), ppc_r0, 0x7FF2); \
ppc_sc ((code)); \
ppc_lwz ((code), (dreg), off1, ppc_r3); \
- if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r11); \
+ if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r12); \
} while (0);
#ifdef PPC_THREAD_PTR_REG
ppc_ldptr ((code), (dreg), off1, PPC_THREAD_PTR_REG); \
} else { \
int off3 = (off2 + 1) > 1; \
- ppc_addis ((code), ppc_r11, PPC_THREAD_PTR_REG, off3); \
- ppc_ldptr ((code), (dreg), off1, ppc_r11); \
+ ppc_addis ((code), ppc_r12, PPC_THREAD_PTR_REG, off3); \
+ ppc_ldptr ((code), (dreg), off1, ppc_r12); \
} \
} while (0);
#else
ppc_load (code, ppc_r0, shifted);
ppc_mtctr (code, ppc_r0);
- //g_assert (sreg == ppc_r11);
- ppc_addi (code, ppc_r12, dreg, (doffset - sizeof (gpointer)));
- ppc_addi (code, ppc_r11, sreg, (soffset - sizeof (gpointer)));
+ //g_assert (sreg == ppc_r12);
+ ppc_addi (code, ppc_r11, dreg, (doffset - sizeof (gpointer)));
+ ppc_addi (code, ppc_r12, sreg, (soffset - sizeof (gpointer)));
copy_loop_start = code;
- ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
- ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
+ ppc_ldptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
+ ppc_stptr_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
copy_loop_jump = code;
ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
ppc_patch (copy_loop_jump, copy_loop_start);
size -= shifted * sizeof (gpointer);
doffset = soffset = 0;
- dreg = ppc_r12;
+ dreg = ppc_r11;
}
#ifdef __mono_ppc64__
/* the hardware has multiple load/store units and the move is long
enough to use more then one regiester, then use load/load/store/store
to execute 2 instructions per cycle. */
- if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r12) && (sreg != ppc_r12)) {
+ if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
while (size >= 16) {
ppc_ldptr (code, ppc_r0, soffset, sreg);
- ppc_ldptr (code, ppc_r12, soffset+8, sreg);
+ ppc_ldptr (code, ppc_r11, soffset+8, sreg);
ppc_stptr (code, ppc_r0, doffset, dreg);
- ppc_stptr (code, ppc_r12, doffset+8, dreg);
+ ppc_stptr (code, ppc_r11, doffset+8, dreg);
size -= 16;
soffset += 16;
doffset += 16;
doffset += 8;
}
#else
- if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r12) && (sreg != ppc_r12)) {
+ if ((cpu_hw_caps & PPC_MULTIPLE_LS_UNITS) && (dreg != ppc_r11) && (sreg != ppc_r11)) {
while (size >= 8) {
ppc_lwz (code, ppc_r0, soffset, sreg);
- ppc_lwz (code, ppc_r12, soffset+4, sreg);
+ ppc_lwz (code, ppc_r11, soffset+4, sreg);
ppc_stw (code, ppc_r0, doffset, dreg);
- ppc_stw (code, ppc_r12, doffset+4, dreg);
+ ppc_stw (code, ppc_r11, doffset+4, dreg);
size -= 8;
soffset += 8;
doffset += 8;
return start;
}
+gpointer
+mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
+{
+ return NULL;
+}
+
gpointer
mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
if (mono_cpu_count () > 1)
cpu_hw_caps |= PPC_SMP_CAPABLE;
- InitializeCriticalSection (&mini_arch_mutex);
+ mono_mutex_init_recursive (&mini_arch_mutex);
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
void
mono_arch_cleanup (void)
{
- DeleteCriticalSection (&mini_arch_mutex);
+ mono_mutex_destroy (&mini_arch_mutex);
}
/*
for (i = 14; i < top; ++i) {
/*
* Reserve r29 for holding the vtable address for virtual calls in AOT mode,
- * since the trampolines can clobber r11.
+ * since the trampolines can clobber r12.
*/
if (!(cfg->compile_aot && i == 29))
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
ppc_load_ptr (code, ppc_r3, cfg->method);
ppc_li (code, ppc_r4, 0); /* NULL ebp for now */
- ppc_load_func (code, ppc_r0, func);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, func);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
return code;
}
}
ppc_load_ptr (code, ppc_r3, cfg->method);
- ppc_load_func (code, ppc_r0, func);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, func);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
switch (save_mode) {
static void
patch_ins (guint8 *code, guint32 ins)
{
- *(guint32*)code = GUINT32_TO_BE (ins);
+ *(guint32*)code = ins;
mono_arch_flush_icache (code, 4);
}
void
ppc_patch_full (guchar *code, const guchar *target, gboolean is_fd)
{
- guint32 ins = GUINT32_FROM_BE (*(guint32*)code);
+ guint32 ins = *(guint32*)code;
guint32 prim = ins >> 26;
guint32 ovf;
else
code -= 24;
} else {
- if (ppc_is_load_op (seq [5]) || ppc_opcode (seq [5]) == 31) /* ld || lwz || mr */
+ if (ppc_is_load_op (seq [5])
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ /* With function descs we need to do more careful
+ matches. */
+ || ppc_opcode (seq [5]) == 31 /* ld || lwz || mr */
+#endif
+ )
branch_ins = seq + 8;
else
branch_ins = seq + 6;
if (!is_fd) {
guint8 *buf = (guint8*)&seq [5];
- ppc_mr (buf, ppc_r0, ppc_r11);
+ ppc_mr (buf, PPC_CALL_REG, ppc_r12);
ppc_nop (buf);
}
} else {
}
/* FIXME: make this thread safe */
- /* FIXME: we're assuming we're using r11 here */
- ppc_load_ptr_sequence (code, ppc_r11, target);
+#ifdef PPC_USES_FUNCTION_DESCRIPTOR
+ /* FIXME: we're assuming we're using r12 here */
+ ppc_load_ptr_sequence (code, ppc_r12, target);
+#else
+ ppc_load_ptr_sequence (code, PPC_CALL_REG, target);
+#endif
mono_arch_flush_icache ((guint8*)seq, 28);
#else
guint32 *seq;
g_assert ((seq [2] >> 26) == 31);
g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
/* FIXME: make this thread safe */
- ppc_lis (code, ppc_r0, (guint32)(target) >> 16);
- ppc_ori (code, ppc_r0, ppc_r0, (guint32)(target) & 0xffff);
+ ppc_lis (code, PPC_CALL_REG, (guint32)(target) >> 16);
+ ppc_ori (code, PPC_CALL_REG, PPC_CALL_REG, (guint32)(target) & 0xffff);
mono_arch_flush_icache (code - 8, 8);
#endif
} else {
if (ppc_is_imm16 (-size)) {
ppc_stptr_update (code, ppc_r0, -size, ppc_sp);
} else {
- ppc_load (code, ppc_r11, -size);
- ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
+ ppc_load (code, ppc_r12, -size);
+ ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
}
return code;
if (ppc_is_imm16 (size)) {
ppc_stptr_update (code, ppc_r0, size, ppc_sp);
} else {
- ppc_load (code, ppc_r11, size);
- ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
+ ppc_load (code, ppc_r12, size);
+ ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
}
return code;
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_IL_SEQ_POINT:
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+ break;
case OP_SEQ_POINT: {
int i;
* a breakpoint is hit will step to the next IL offset.
*/
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- ppc_load (code, ppc_r11, (gsize)ss_trigger_page);
- ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+ ppc_load (code, ppc_r12, (gsize)ss_trigger_page);
+ ppc_ldptr (code, ppc_r12, 0, ppc_r12);
}
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
ppc_stb (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stb (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stbx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_sth (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_sth (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_sthx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_stptr (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stptr (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stptr_indexed (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_break");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
*/
g_assert (!cfg->method->save_lmf);
/*
- * Note: we can use ppc_r11 here because it is dead anyway:
+ * Note: we can use ppc_r12 here because it is dead anyway:
* we're leaving the method.
*/
if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
if (ppc_is_imm16 (ret_offset)) {
ppc_ldptr (code, ppc_r0, ret_offset, cfg->frame_reg);
} else {
- ppc_load (code, ppc_r11, ret_offset);
- ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
+ ppc_load (code, ppc_r12, ret_offset);
+ ppc_ldptr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
}
ppc_mtlr (code, ppc_r0);
}
if (ppc_is_imm16 (cfg->stack_usage)) {
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
+ ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
} else {
/* cfg->stack_usage is an int, so we can use
* an addis/addi sequence here even in 64-bit. */
- ppc_addis (code, ppc_r11, cfg->frame_reg, ppc_ha(cfg->stack_usage));
- ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
+ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
+ ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
}
if (!cfg->method->save_lmf) {
pos = 0;
for (i = 31; i >= 13; --i) {
if (cfg->used_int_regs & (1 << i)) {
pos += sizeof (gpointer);
- ppc_ldptr (code, i, -pos, ppc_r11);
+ ppc_ldptr (code, i, -pos, ppc_r12);
}
}
} else {
/* Copy arguments on the stack to our argument area */
if (call->stack_usage) {
- code = emit_memcpy (code, call->stack_usage, ppc_r11, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
- /* r11 was clobbered */
+ code = emit_memcpy (code, call->stack_usage, ppc_r12, PPC_STACK_PARAM_OFFSET, ppc_sp, PPC_STACK_PARAM_OFFSET);
+ /* r12 was clobbered */
g_assert (cfg->frame_reg == ppc_sp);
if (ppc_is_imm16 (cfg->stack_usage)) {
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
+ ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage);
} else {
/* cfg->stack_usage is an int, so we can use
* an addis/addi sequence here even in 64-bit. */
- ppc_addis (code, ppc_r11, cfg->frame_reg, ppc_ha(cfg->stack_usage));
- ppc_addi (code, ppc_r11, ppc_r11, cfg->stack_usage);
+ ppc_addis (code, ppc_r12, cfg->frame_reg, ppc_ha(cfg->stack_usage));
+ ppc_addi (code, ppc_r12, ppc_r12, cfg->stack_usage);
}
}
- ppc_mr (code, ppc_sp, ppc_r11);
+ ppc_mr (code, ppc_sp, ppc_r12);
mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
if (cfg->compile_aot) {
/* arch_emit_got_access () patches this */
ppc_load32 (code, ppc_r0, 0);
#ifdef PPC_USES_FUNCTION_DESCRIPTOR
- ppc_ldptr_indexed (code, ppc_r11, ppc_r30, ppc_r0);
- ppc_ldptr (code, ppc_r0, 0, ppc_r11);
+ ppc_ldptr_indexed (code, ppc_r12, ppc_r30, ppc_r0);
+ ppc_ldptr (code, ppc_r0, 0, ppc_r12);
#else
ppc_ldptr_indexed (code, ppc_r0, ppc_r30, ppc_r0);
#endif
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
- if (cfg->compile_aot && ins->sreg1 == ppc_r11) {
+ if (cfg->compile_aot && ins->sreg1 == ppc_r12) {
/* The trampolines clobber this */
ppc_mr (code, ppc_r29, ins->sreg1);
ppc_ldptr (code, ppc_r0, ins->inst_offset, ppc_r29);
int alloca_waste = PPC_STACK_PARAM_OFFSET + cfg->param_area + 31;
int area_offset = alloca_waste;
area_offset &= ~31;
- ppc_addi (code, ppc_r11, ins->sreg1, alloca_waste + 31);
+ ppc_addi (code, ppc_r12, ins->sreg1, alloca_waste + 31);
/* FIXME: should be calculated from MONO_ARCH_FRAME_ALIGNMENT */
- ppc_clear_right_imm (code, ppc_r11, ppc_r11, 4);
+ ppc_clear_right_imm (code, ppc_r12, ppc_r12, 4);
/* use ctr to store the number of words to 0 if needed */
if (ins->flags & MONO_INST_INIT) {
/* we zero 4 bytes at a time:
ppc_mtctr (code, ppc_r0);
}
ppc_ldptr (code, ppc_r0, 0, ppc_sp);
- ppc_neg (code, ppc_r11, ppc_r11);
- ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r11);
+ ppc_neg (code, ppc_r12, ppc_r12);
+ ppc_stptr_update_indexed (code, ppc_r0, ppc_sp, ppc_r12);
/* FIXME: make this loop work in 8 byte
increments on PPC64 */
* run at least once
*/
ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
- ppc_li (code, ppc_r11, 0);
+ ppc_li (code, ppc_r12, 0);
zero_loop_start = code;
- ppc_stwu (code, ppc_r11, 4, ins->dreg);
+ ppc_stwu (code, ppc_r12, 4, ins->dreg);
zero_loop_jump = code;
ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
ppc_patch (zero_loop_jump, zero_loop_start);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
if (ppc_is_imm16 (spvar->inst_offset)) {
ppc_stptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
} else {
- ppc_load (code, ppc_r11, spvar->inst_offset);
- ppc_stptr_indexed (code, ppc_r0, ppc_r11, spvar->inst_basereg);
+ ppc_load (code, ppc_r12, spvar->inst_offset);
+ ppc_stptr_indexed (code, ppc_r0, ppc_r12, spvar->inst_basereg);
}
break;
}
if (ppc_is_imm16 (spvar->inst_offset)) {
ppc_ldptr (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
} else {
- ppc_load (code, ppc_r11, spvar->inst_offset);
- ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, spvar->inst_offset);
+ ppc_ldptr_indexed (code, ppc_r0, spvar->inst_basereg, ppc_r12);
}
ppc_mtlr (code, ppc_r0);
ppc_blr (code);
/* FIXME: Optimize this */
ppc_bl (code, 1);
- ppc_mflr (code, ppc_r11);
+ ppc_mflr (code, ppc_r12);
ppc_b (code, 3);
*(double*)code = *(double*)ins->inst_p0;
code += 8;
- ppc_lfd (code, ins->dreg, 8, ppc_r11);
+ ppc_lfd (code, ins->dreg, 8, ppc_r12);
break;
case OP_R4CONST:
g_assert_not_reached ();
ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stfd (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stfdx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_lfd (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_lfd (code, ins->dreg, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_lfdx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
ppc_stfs (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_stfs (code, ins->sreg1, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_stfsx (code, ins->sreg1, ins->inst_destbasereg, ppc_r0);
ppc_lfs (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
} else {
if (ppc_is_imm32 (ins->inst_offset)) {
- ppc_addis (code, ppc_r12, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
- ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r12);
+ ppc_addis (code, ppc_r11, ins->inst_destbasereg, ppc_ha(ins->inst_offset));
+ ppc_lfs (code, ins->dreg, ins->inst_offset, ppc_r11);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_lfsx (code, ins->dreg, ins->inst_destbasereg, ppc_r0);
}
#ifdef __mono_ppc64__
+#ifdef _LITTLE_ENDIAN
+#define patch_load_sequence(ip,val) do {\
+ guint16 *__load = (guint16*)(ip); \
+ g_assert (sizeof (val) == sizeof (gsize)); \
+ __load [0] = (((guint64)(gsize)(val)) >> 48) & 0xffff; \
+ __load [2] = (((guint64)(gsize)(val)) >> 32) & 0xffff; \
+ __load [6] = (((guint64)(gsize)(val)) >> 16) & 0xffff; \
+ __load [8] = ((guint64)(gsize)(val)) & 0xffff; \
+ } while (0)
+#elif defined _BIG_ENDIAN
#define patch_load_sequence(ip,val) do {\
guint16 *__load = (guint16*)(ip); \
g_assert (sizeof (val) == sizeof (gsize)); \
__load [9] = ((guint64)(gsize)(val)) & 0xffff; \
} while (0)
#else
+#error huh? No endianess defined by compiler
+#endif
+#else
#define patch_load_sequence(ip,val) do {\
guint16 *__lis_ori = (guint16*)(ip); \
__lis_ori [1] = (((gulong)(val)) >> 16) & 0xffff; \
code = save_registers (cfg, code, alloc_size - pos, ppc_sp, method->save_lmf, cfg->used_int_regs, cfa_offset);
} else {
if (pos)
- ppc_addi (code, ppc_r11, ppc_sp, -pos);
+ ppc_addi (code, ppc_r12, ppc_sp, -pos);
ppc_load (code, ppc_r0, -alloc_size);
ppc_str_update_indexed (code, ppc_sp, ppc_sp, ppc_r0);
cfa_offset = alloc_size;
mono_emit_unwind_op_def_cfa_offset (cfg, code, alloc_size);
- code = save_registers (cfg, code, 0, ppc_r11, method->save_lmf, cfg->used_int_regs, cfa_offset);
+ code = save_registers (cfg, code, 0, ppc_r12, method->save_lmf, cfg->used_int_regs, cfa_offset);
}
}
if (cfg->frame_reg != ppc_sp) {
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stptr_indexed (code, ainfo->reg, ppc_r11, inst->inst_basereg);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stptr_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
}
}
else if (ainfo->regtype == RegTypeFP)
ppc_fmr (code, inst->dreg, ainfo->reg);
else if (ainfo->regtype == RegTypeBase) {
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
- ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r11);
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
+ ppc_ldptr (code, inst->dreg, ainfo->offset, ppc_r12);
} else
g_assert_not_reached ();
ppc_stb (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stb (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stbx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_sth (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_sth (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_sthx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stw (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stwx (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_str (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_str_indexed (code, ainfo->reg, ppc_r11, inst->inst_basereg);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_str_indexed (code, ainfo->reg, ppc_r12, inst->inst_basereg);
}
break;
#else
ppc_stw (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
ppc_stw (code, ainfo->reg + 1, inst->inst_offset + 4, inst->inst_basereg);
} else {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_addi (code, ppc_r11, ppc_r11, inst->inst_offset);
- ppc_stw (code, ainfo->reg, 0, ppc_r11);
- ppc_stw (code, ainfo->reg + 1, 4, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
+ ppc_stw (code, ainfo->reg, 0, ppc_r12);
+ ppc_stw (code, ainfo->reg + 1, 4, ppc_r12);
}
break;
#endif
ppc_stptr (code, ainfo->reg, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stptr (code, ainfo->reg, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stptr_indexed (code, ainfo->reg, inst->inst_basereg, ppc_r12);
}
}
break;
}
} else if (ainfo->regtype == RegTypeBase) {
g_assert (ppc_is_imm16 (ainfo->offset));
- /* load the previous stack pointer in r11 */
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
- ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r11);
+ /* load the previous stack pointer in r12 */
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
+ ppc_ldptr (code, ppc_r0, ainfo->offset, ppc_r12);
switch (ainfo->size) {
case 1:
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_stb (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stb (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stbx (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_sth (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_sth (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_sthx (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stw (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stwx (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
if (ppc_is_imm16 (inst->inst_offset)) {
ppc_str (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_str_indexed (code, ppc_r0, ppc_r11, inst->inst_basereg);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_str_indexed (code, ppc_r0, ppc_r12, inst->inst_basereg);
}
break;
#else
g_assert (ppc_is_imm16 (ainfo->offset + 4));
if (ppc_is_imm16 (inst->inst_offset + 4)) {
ppc_stw (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
- ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r11);
+ ppc_lwz (code, ppc_r0, ainfo->offset + 4, ppc_r12);
ppc_stw (code, ppc_r0, inst->inst_offset + 4, inst->inst_basereg);
} else {
- /* use r12 to load the 2nd half of the long before we clobber r11. */
- ppc_lwz (code, ppc_r12, ainfo->offset + 4, ppc_r11);
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_addi (code, ppc_r11, ppc_r11, inst->inst_offset);
- ppc_stw (code, ppc_r0, 0, ppc_r11);
- ppc_stw (code, ppc_r12, 4, ppc_r11);
+ /* use r11 to load the 2nd half of the long before we clobber r12. */
+ ppc_lwz (code, ppc_r11, ainfo->offset + 4, ppc_r12);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_addi (code, ppc_r12, ppc_r12, inst->inst_offset);
+ ppc_stw (code, ppc_r0, 0, ppc_r12);
+ ppc_stw (code, ppc_r11, 4, ppc_r12);
}
break;
#endif
ppc_stptr (code, ppc_r0, inst->inst_offset, inst->inst_basereg);
} else {
if (ppc_is_imm32 (inst->inst_offset)) {
- ppc_addis (code, ppc_r11, inst->inst_basereg, ppc_ha(inst->inst_offset));
- ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r11);
+ ppc_addis (code, ppc_r12, inst->inst_basereg, ppc_ha(inst->inst_offset));
+ ppc_stptr (code, ppc_r0, inst->inst_offset, ppc_r12);
} else {
- ppc_load (code, ppc_r11, inst->inst_offset);
- ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r12, inst->inst_offset);
+ ppc_stptr_indexed (code, ppc_r0, inst->inst_basereg, ppc_r12);
}
}
break;
/* FIXME: we need to do the shifting here, too */
if (ainfo->bytes)
NOT_IMPLEMENTED;
- /* load the previous stack pointer in r11 (r0 gets overwritten by the memcpy) */
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
+ /* load the previous stack pointer in r12 (r0 gets overwritten by the memcpy) */
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
if ((size & MONO_PPC_32_64_CASE (3, 7)) != 0) {
code = emit_memcpy (code, size - soffset,
inst->inst_basereg, doffset,
- ppc_r11, ainfo->offset + soffset);
+ ppc_r12, ainfo->offset + soffset);
} else {
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer),
inst->inst_basereg, doffset,
- ppc_r11, ainfo->offset + soffset);
+ ppc_r12, ainfo->offset + soffset);
}
}
} else if (ainfo->regtype == RegTypeStructByAddr) {
/* if it was originally a RegTypeBase */
if (ainfo->offset) {
- /* load the previous stack pointer in r11 */
- ppc_ldr (code, ppc_r11, 0, ppc_sp);
- ppc_ldptr (code, ppc_r11, ainfo->offset, ppc_r11);
+ /* load the previous stack pointer in r12 */
+ ppc_ldr (code, ppc_r12, 0, ppc_sp);
+ ppc_ldptr (code, ppc_r12, ainfo->offset, ppc_r12);
} else {
- ppc_mr (code, ppc_r11, ainfo->reg);
+ ppc_mr (code, ppc_r12, ainfo->reg);
}
if (cfg->tailcall_valuetype_addrs) {
MonoInst *addr = cfg->tailcall_valuetype_addrs [tailcall_struct_index];
g_assert (ppc_is_imm16 (addr->inst_offset));
- ppc_stptr (code, ppc_r11, addr->inst_offset, addr->inst_basereg);
+ ppc_stptr (code, ppc_r12, addr->inst_offset, addr->inst_basereg);
tailcall_struct_index++;
}
g_assert (ppc_is_imm16 (inst->inst_offset));
- code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r11, 0);
+ code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r12, 0);
/*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
} else
g_assert_not_reached ();
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_get_lmf_addr");
if ((FORCE_INDIR_CALL || cfg->method->dynamic) && !cfg->compile_aot) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtlr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtlr (code, PPC_CALL_REG);
ppc_blrl (code);
} else {
ppc_bl (code, 0);
/* lmf_offset is the offset from the previous stack pointer,
* alloc_size is the total stack space allocated, so the offset
* of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
- * The pointer to the struct is put in ppc_r11 (new_lmf).
+ * The pointer to the struct is put in ppc_r12 (new_lmf).
* The callee-saved registers are already in the MonoLMF structure
*/
- ppc_addi (code, ppc_r11, ppc_sp, alloc_size - lmf_offset);
+ ppc_addi (code, ppc_r12, ppc_sp, alloc_size - lmf_offset);
/* ppc_r3 is the result from mono_get_lmf_addr () */
- ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
+ ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
/* new_lmf->previous_lmf = *lmf_addr */
ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
- ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
- /* *(lmf_addr) = r11 */
- ppc_stptr (code, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
+ /* *(lmf_addr) = r12 */
+ ppc_stptr (code, ppc_r12, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3);
/* save method info */
if (cfg->compile_aot)
// FIXME:
ppc_load (code, ppc_r0, 0);
else
ppc_load_ptr (code, ppc_r0, method);
- ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11);
- ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r12);
+ ppc_stptr (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r12);
/* save the current IP */
if (cfg->compile_aot) {
ppc_bl (code, 1);
ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
#endif
}
- ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
+ ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r12);
}
if (tracing)
lmf_offset = pos;
/* save the frame reg in r8 */
ppc_mr (code, ppc_r8, cfg->frame_reg);
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage - lmf_offset);
+ ppc_addi (code, ppc_r12, cfg->frame_reg, cfg->stack_usage - lmf_offset);
/* r5 = previous_lmf */
- ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11);
+ ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r12);
/* r6 = lmf_addr */
- ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11);
+ ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r12);
/* *(lmf_addr) = previous_lmf */
ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6);
/* FIXME: speedup: there is no actual need to restore the registers if
* we didn't actually change them (idea from Zoltan).
*/
/* restore iregs */
- ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11);
+ ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r12);
/* restore fregs */
/*for (i = 14; i < 32; i++) {
- ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11);
+ ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r12);
}*/
g_assert (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET));
/* use the saved copy of the frame reg in r8 */
if (ppc_is_imm16 (return_offset)) {
ppc_ldr (code, ppc_r0, return_offset, cfg->frame_reg);
} else {
- ppc_load (code, ppc_r11, return_offset);
- ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
+ ppc_load (code, ppc_r12, return_offset);
+ ppc_ldr_indexed (code, ppc_r0, cfg->frame_reg, ppc_r12);
}
ppc_mtlr (code, ppc_r0);
}
offset -= sizeof (mgreg_t);
}
if (cfg->frame_reg != ppc_sp)
- ppc_mr (code, ppc_r11, cfg->frame_reg);
+ ppc_mr (code, ppc_r12, cfg->frame_reg);
/* note r31 (possibly the frame register) is restored last */
for (i = 13; i <= 31; i++) {
if (cfg->used_int_regs & (1 << i)) {
}
}
if (cfg->frame_reg != ppc_sp)
- ppc_addi (code, ppc_sp, ppc_r11, cfg->stack_usage);
+ ppc_addi (code, ppc_sp, ppc_r12, cfg->stack_usage);
else
ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
} else {
- ppc_load32 (code, ppc_r11, cfg->stack_usage);
+ ppc_load32 (code, ppc_r12, cfg->stack_usage);
if (cfg->used_int_regs) {
- ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
+ ppc_add (code, ppc_r12, cfg->frame_reg, ppc_r12);
for (i = 31; i >= 13; --i) {
if (cfg->used_int_regs & (1 << i)) {
pos += sizeof (mgreg_t);
- ppc_ldr (code, i, -pos, ppc_r11);
+ ppc_ldr (code, i, -pos, ppc_r12);
}
}
- ppc_mr (code, ppc_sp, ppc_r11);
+ ppc_mr (code, ppc_sp, ppc_r12);
} else {
- ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r11);
+ ppc_add (code, ppc_sp, cfg->frame_reg, ppc_r12);
}
}
patch_info->data.name = "mono_arch_throw_corlib_exception";
patch_info->ip.i = code - cfg->native_code;
if (FORCE_INDIR_CALL || cfg->method->dynamic) {
- ppc_load_func (code, ppc_r0, 0);
- ppc_mtctr (code, ppc_r0);
+ ppc_load_func (code, PPC_CALL_REG, 0);
+ ppc_mtctr (code, PPC_CALL_REG);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
} else {
ppc_bl (code, 0);
start = code;
/*
- * We need to save and restore r11 because it might be
+ * We need to save and restore r12 because it might be
* used by the caller as the vtable register, so
* clobbering it will trip up the magic trampoline.
*
- * FIXME: Get rid of this by making sure that r11 is
+ * FIXME: Get rid of this by making sure that r12 is
* not used as the vtable register in interface calls.
*/
- ppc_stptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
- ppc_load (code, ppc_r11, (gsize)(& (vtable->vtable [0])));
+ ppc_stptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
+ ppc_load (code, ppc_r12, (gsize)(& (vtable->vtable [0])));
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->has_target_code) {
ppc_load_ptr (code, ppc_r0, item->value.target_code);
} else {
- ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
- ppc_ldptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
+ ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
+ ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
}
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
item->jmp_code = code;
ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
#endif
- ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
- ppc_ldptr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_sp);
+ ppc_ldptr (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r12);
+ ppc_ldptr (code, ppc_r12, PPC_RET_ADDR_OFFSET, ppc_sp);
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
#if ENABLE_WRONG_METHOD_CHECK
* Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
* TARGET from the mscorlib GOT in full-aot code.
* On PPC, the GOT address is assumed to be in r30, and the result is placed into
- * r11.
+ * r12.
*/
guint8*
mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
{
/* Load the mscorlib got address */
- ppc_ldptr (code, ppc_r11, sizeof (gpointer), ppc_r30);
+ ppc_ldptr (code, ppc_r12, sizeof (gpointer), ppc_r30);
*ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
/* arch_emit_got_access () patches this */
ppc_load32 (code, ppc_r0, 0);
- ppc_ldptr_indexed (code, ppc_r11, ppc_r11, ppc_r0);
+ ppc_ldptr_indexed (code, ppc_r12, ppc_r12, ppc_r0);
return code;
}
guint8 *code = ip;
guint8 *orig_code = code;
- ppc_load_sequence (code, ppc_r11, (gsize)bp_trigger_page);
- ppc_ldptr (code, ppc_r11, 0, ppc_r11);
+ ppc_load_sequence (code, ppc_r12, (gsize)bp_trigger_page);
+ ppc_ldptr (code, ppc_r12, 0, ppc_r12);
g_assert (code - orig_code == BREAKPOINT_SIZE);