#ifdef __APPLE__
#include <sys/sysctl.h>
#endif
+#ifdef __linux__
+#include <unistd.h>
+#endif
#define FORCE_INDIR_CALL 1
if ((dreg) != ppc_r3) ppc_mr ((code), ppc_r3, ppc_r11); \
} while (0);
+#ifdef PPC_THREAD_PTR_REG
+#define emit_nptl_tls(code,dreg,key) do { \
+ int off1 = key; \
+ int off2 = key >> 15; \
+ if ((off2 == 0) || (off2 == -1)) { \
+ ppc_load_reg ((code), (dreg), off1, PPC_THREAD_PTR_REG); \
+ } else { \
+ int off3 = (off2 + 1) > 1; \
+ ppc_addis ((code), ppc_r11, PPC_THREAD_PTR_REG, off3); \
+ ppc_load_reg ((code), (dreg), off1, ppc_r11); \
+ } \
+ } while (0);
+#else
+#define emit_nptl_tls(code,dreg,key) do { \
+ g_assert_not_reached (); \
+ } while (0)
+#endif
+
#define emit_tls_access(code,dreg,key) do { \
switch (tls_mode) { \
case TLS_MODE_LTHREADS: emit_linuxthreads_tls(code,dreg,key); break; \
+ case TLS_MODE_NPTL: emit_nptl_tls(code,dreg,key); break; \
case TLS_MODE_DARWIN_G5: emit_darwing5_tls(code,dreg,key); break; \
case TLS_MODE_DARWIN_G4: emit_darwing4_tls(code,dreg,key); break; \
default: g_assert_not_reached (); \
{
/* unrolled, use the counter in big */
if (size > sizeof (gpointer) * 5) {
- int shifted = size >> MONO_PPC_32_64_CASE (2, 3);
+ long shifted = size >> MONO_PPC_32_64_CASE (2, 3);
guint8 *copy_loop_start, *copy_loop_jump;
ppc_load (code, ppc_r0, shifted);
ppc_addi (code, ppc_r12, dreg, (doffset - sizeof (gpointer)));
ppc_addi (code, ppc_r11, sreg, (soffset - sizeof (gpointer)));
copy_loop_start = code;
- ppc_load_reg_update (code, ppc_r0, sizeof (gpointer), ppc_r11);
- ppc_store_reg_update (code, ppc_r0, sizeof (gpointer), ppc_r12);
+ ppc_load_reg_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r11);
+ ppc_store_reg_update (code, ppc_r0, (unsigned int)sizeof (gpointer), ppc_r12);
copy_loop_jump = code;
ppc_bc (code, PPC_BR_DEC_CTR_NONZERO, 0, 0);
ppc_patch (copy_loop_jump, copy_loop_start);
return o;
}
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
-{
- gpointer vt;
- int displacement;
- vt = mono_arch_get_vcall_slot (code, regs, &displacement);
- if (!vt)
- return NULL;
- return (gpointer*)((char*)vt + displacement);
-}
-
#define MAX_ARCH_DELEGATE_PARAMS 7
gpointer
int i, soffset, dreg;
if (ainfo->regtype == RegTypeStructByVal) {
+#ifdef __APPLE__
guint32 size = 0;
+#endif
soffset = 0;
#ifdef __APPLE__
/*
}
}
+void
+mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
+{
+ switch (ins->opcode) {
+ case OP_LADD_OVF:
+ /* ADC sets the condition code */
+ MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
+ MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
+ NULLIFY_INS (ins);
+ break;
+ case OP_LADD_OVF_UN:
+ /* ADC sets the condition code */
+ MONO_EMIT_NEW_BIALU (cfg, OP_ADDCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
+ MONO_EMIT_NEW_BIALU (cfg, OP_ADD_OVF_UN_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
+ NULLIFY_INS (ins);
+ break;
+ case OP_LSUB_OVF:
+ /* SBB sets the condition code */
+ MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
+ MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
+ NULLIFY_INS (ins);
+ break;
+ case OP_LSUB_OVF_UN:
+ /* SBB sets the condition code */
+ MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, ins->sreg1 + 1, ins->sreg2 + 1);
+ MONO_EMIT_NEW_BIALU (cfg, OP_SUB_OVF_UN_CARRY, ins->dreg + 2, ins->sreg1 + 2, ins->sreg2 + 2);
+ NULLIFY_INS (ins);
+ break;
+ case OP_LNEG:
+ /* This is the old version from inssel-long32.brg */
+ MONO_EMIT_NEW_UNALU (cfg, OP_INOT, ins->dreg + 1, ins->sreg1 + 1);
+ MONO_EMIT_NEW_UNALU (cfg, OP_INOT, ins->dreg + 2, ins->sreg1 + 2);
+ /* ADC sets the condition codes */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADC_IMM, ins->dreg + 1, ins->dreg + 1, 1);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADC_IMM, ins->dreg + 2, ins->dreg + 2, 0);
+ NULLIFY_INS (ins);
+ break;
+ default:
+ break;
+ }
+}
+
/*
* the branch_b0_table should maintain the order of these
* opcodes.
static guchar*
emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
{
- int offset = cfg->arch.fp_conv_var_offset;
- int sub_offset;
+ long offset = cfg->arch.fp_conv_var_offset;
+ long sub_offset;
/* sreg is a float, dreg is an integer reg. ppc_f0 is used a scratch */
#ifdef __mono_ppc64__
if (size == 8) {
pdata.found = 0;
mono_domain_lock (domain);
- mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
if (!pdata.found) {
/* this uses the first available slot */
pdata.found = 2;
- mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
+ mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
}
mono_domain_unlock (domain);
}
case RegTypeStructByVal: {
+#ifdef __APPLE__
guint32 size = 0;
+#endif
int j;
/* FIXME: */
static guint8*
emit_reserve_param_area (MonoCompile *cfg, guint8 *code)
{
- int size = cfg->param_area;
+ long size = cfg->param_area;
size += MONO_ARCH_FRAME_ALIGNMENT - 1;
size &= -MONO_ARCH_FRAME_ALIGNMENT;
static guint8*
emit_unreserve_param_area (MonoCompile *cfg, guint8 *code)
{
- int size = cfg->param_area;
+ long size = cfg->param_area;
size += MONO_ARCH_FRAME_ALIGNMENT - 1;
size &= -MONO_ARCH_FRAME_ALIGNMENT;
}
break;
case OP_LOADI4_MEMBASE:
+#ifdef __mono_ppc64__
+ if (ppc_is_imm16 (ins->inst_offset)) {
+ ppc_lwa (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ppc_load (code, ppc_r0, ins->inst_offset);
+ ppc_lwax (code, ins->dreg, ins->inst_basereg, ppc_r0);
+ }
+ break;
+#endif
case OP_LOADU4_MEMBASE:
if (ppc_is_imm16 (ins->inst_offset)) {
ppc_lwz (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_lwzx (code, ins->dreg, ins->inst_basereg, ppc_r0);
}
-#ifdef __mono_ppc64__
- if (ins->opcode == OP_LOADI4_MEMBASE)
- ppc_extsw (code, ins->dreg, ins->dreg);
-#endif
break;
case OP_LOADI1_MEMBASE:
case OP_LOADU1_MEMBASE:
break;
case OP_LOADI2_MEMBASE:
if (ppc_is_imm16 (ins->inst_offset)) {
- ppc_lha (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
+ ppc_lha (code, ins->dreg, ins->inst_offset, ins->inst_basereg);
} else {
ppc_load (code, ppc_r0, ins->inst_offset);
ppc_lhax (code, ins->dreg, ins->inst_basereg, ppc_r0);
ppc_load_reg_indexed (code, ins->dreg, ins->sreg2, ins->inst_basereg);
break;
case OP_LOADI4_MEMINDEX:
- case OP_LOADU4_MEMINDEX:
- ppc_lwzx (code, ins->dreg, ins->sreg2, ins->inst_basereg);
#ifdef __mono_ppc64__
- if (ins->opcode == OP_LOADI4_MEMINDEX)
- ppc_extsb (code, ins->dreg, ins->dreg);
+ ppc_lwax (code, ins->dreg, ins->sreg2, ins->inst_basereg);
+ break;
#endif
+ case OP_LOADU4_MEMINDEX:
+ ppc_lwzx (code, ins->dreg, ins->sreg2, ins->inst_basereg);
break;
case OP_LOADU2_MEMINDEX:
ppc_lhzx (code, ins->dreg, ins->sreg2, ins->inst_basereg);
* we're leaving the method.
*/
if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
- if (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET)) {
- ppc_load_reg (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, cfg->frame_reg);
+ long ret_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
+ if (ppc_is_imm16 (ret_offset)) {
+ ppc_load_reg (code, ppc_r0, ret_offset, cfg->frame_reg);
} else {
- ppc_load (code, ppc_r11, cfg->stack_usage + PPC_RET_ADDR_OFFSET);
+ ppc_load (code, ppc_r11, ret_offset);
ppc_load_reg_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
}
ppc_mtlr (code, ppc_r0);
code = emit_load_volatile_arguments (cfg, code);
if (ppc_is_imm16 (cfg->stack_usage)) {
- ppc_addic (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
+ ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->stack_usage);
} else {
ppc_load (code, ppc_r11, cfg->stack_usage);
ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
ppc_load_reg (code, ppc_r0, 0, ins->sreg1);
break;
case OP_ARGLIST: {
- if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
- ppc_addi (code, ppc_r0, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
+ long cookie_offset = cfg->sig_cookie + cfg->stack_usage;
+ if (ppc_is_imm16 (cookie_offset)) {
+ ppc_addi (code, ppc_r0, cfg->frame_reg, cookie_offset);
} else {
- ppc_load (code, ppc_r0, cfg->sig_cookie + cfg->stack_usage);
+ ppc_load (code, ppc_r0, cookie_offset);
ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0);
}
ppc_store_reg (code, ppc_r0, 0, ins->sreg1);
break;
case OP_JUMP_TABLE:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+#ifdef __mono_ppc64__
ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0f0f0f0f0fL);
+#else
+ ppc_load_sequence (code, ins->dreg, (gulong)0x0f0f0f0fL);
+#endif
break;
}
break;
}
#endif
+ case OP_ATOMIC_CAS_I4:
+ CASE_PPC64 (OP_ATOMIC_CAS_I8) {
+ int location = ins->sreg1;
+ int value = ins->sreg2;
+ int comparand = ins->sreg3;
+ guint8 *start, *not_equal, *lost_reservation;
+
+ start = code;
+ if (ins->opcode == OP_ATOMIC_CAS_I4)
+ ppc_lwarx (code, ppc_r0, 0, location);
+#ifdef __mono_ppc64__
+ else
+ ppc_ldarx (code, ppc_r0, 0, location);
+#endif
+ ppc_cmp (code, 0, ins->opcode == OP_ATOMIC_CAS_I4 ? 0 : 1, ppc_r0, comparand);
+
+ not_equal = code;
+ ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
+ if (ins->opcode == OP_ATOMIC_CAS_I4)
+ ppc_stwcxd (code, value, 0, location);
+#ifdef __mono_ppc64__
+ else
+ ppc_stdcxd (code, value, 0, location);
+#endif
+
+ lost_reservation = code;
+ ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
+ ppc_patch (lost_reservation, start);
+
+ ppc_patch (not_equal, code);
+ ppc_mr (code, ins->dreg, ppc_r0);
+ break;
+ }
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
- int alloc_size, pos, max_offset, i;
+ long alloc_size, pos, max_offset;
+ int i;
guint8 *code;
CallInfo *cinfo;
int tracing = 0;
g_assert_not_reached ();
if (cfg->verbose_level > 2)
- g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
+ g_print ("Argument %ld assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
} else {
/* the argument should be put on the stack: FIXME handle size != word */
if (ainfo->regtype == RegTypeGeneral) {
if (method->save_lmf) {
if (lmf_pthread_key != -1) {
emit_tls_access (code, ppc_r3, lmf_pthread_key);
- if (G_STRUCT_OFFSET (MonoJitTlsData, lmf))
+ if (tls_mode != TLS_MODE_NPTL && G_STRUCT_OFFSET (MonoJitTlsData, lmf))
ppc_addi (code, ppc_r3, ppc_r3, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
} else {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
ppc_store_reg (code, ppc_sp, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11);
/* save the current IP */
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
+#ifdef __mono_ppc64__
ppc_load_sequence (code, ppc_r0, (gulong)0x0101010101010101L);
+#else
+ ppc_load_sequence (code, ppc_r0, (gulong)0x01010101L);
+#endif
ppc_store_reg (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11);
}
* we didn't actually change them (idea from Zoltan).
*/
/* restore iregs */
- ppc_load_multiple_regs (code, ppc_r13, ppc_r11, G_STRUCT_OFFSET(MonoLMF, iregs));
+ ppc_load_multiple_regs (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11);
/* restore fregs */
/*for (i = 14; i < 32; i++) {
ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11);
ppc_addic (code, ppc_sp, ppc_r8, cfg->stack_usage);
} else {
if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
- if (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET)) {
- ppc_load_reg (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, cfg->frame_reg);
+ long return_offset = cfg->stack_usage + PPC_RET_ADDR_OFFSET;
+ if (ppc_is_imm16 (return_offset)) {
+ ppc_load_reg (code, ppc_r0, return_offset, cfg->frame_reg);
} else {
- ppc_load (code, ppc_r11, cfg->stack_usage + PPC_RET_ADDR_OFFSET);
+ ppc_load (code, ppc_r11, return_offset);
ppc_load_reg_indexed (code, ppc_r0, cfg->frame_reg, ppc_r11);
}
ppc_mtlr (code, ppc_r0);
}
}
if (cfg->frame_reg != ppc_sp)
- ppc_addic (code, ppc_sp, ppc_r11, cfg->stack_usage);
+ ppc_addi (code, ppc_sp, ppc_r11, cfg->stack_usage);
else
- ppc_addic (code, ppc_sp, ppc_sp, cfg->stack_usage);
+ ppc_addi (code, ppc_sp, ppc_sp, cfg->stack_usage);
} else {
ppc_load (code, ppc_r11, cfg->stack_usage);
if (cfg->used_int_regs) {
break;
}
case MONO_PATCH_INFO_EXC: {
+ MonoClass *exc_class;
+
unsigned char *ip = patch_info->ip.i + cfg->native_code;
i = exception_id_by_name (patch_info->data.target);
if (exc_throw_pos [i]) {
} else {
exc_throw_pos [i] = code;
}
+
+ exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
+ g_assert (exc_class);
+
ppc_patch (ip, code);
/*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
- ppc_load (code, ppc_r3, patch_info->data.target);
- /* we got here from a conditional call, so the calling ip is set in lr already */
+ ppc_load (code, ppc_r3, exc_class->type_token);
+ /* we got here from a conditional call, so the calling ip is set in lr */
+ ppc_mflr (code, ppc_r4);
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
- patch_info->data.name = "mono_arch_throw_exception_by_name";
+ patch_info->data.name = "mono_arch_throw_corlib_exception";
patch_info->ip.i = code - cfg->native_code;
if (FORCE_INDIR_CALL || cfg->method->dynamic) {
ppc_load_func (code, ppc_r0, 0);
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
} else {
- ppc_b (code, 0);
+ ppc_bl (code, 0);
}
break;
}
}
+#if DEAD_CODE
static int
try_offset_access (void *value, guint32 idx)
{
return 0;
return 1;
}
+#endif
static void
setup_tls_access (void)
{
-#ifdef __mono_ppc64__
- /* FIXME: implement */
- tls_mode = TLS_MODE_FAILED;
- return;
-#else
guint32 ptk;
+
+#if defined(__linux__) && defined(_CS_GNU_LIBPTHREAD_VERSION)
+ size_t conf_size = 0;
+ char confbuf[128];
+#else
+ /* FIXME for darwin */
guint32 *ins, *code;
guint32 cmplwi_1023, li_0x48, blr_ins;
+#endif
+
if (tls_mode == TLS_MODE_FAILED)
return;
-
if (g_getenv ("MONO_NO_TLS")) {
tls_mode = TLS_MODE_FAILED;
return;
}
- if (tls_mode == TLS_MODE_DETECT) {
+ if (tls_mode == TLS_MODE_DETECT) {
+#if defined(__linux__) && defined(_CS_GNU_LIBPTHREAD_VERSION)
+ conf_size = confstr ( _CS_GNU_LIBPTHREAD_VERSION, confbuf, sizeof(confbuf));
+ if ((conf_size > 4) && (strncmp (confbuf, "NPTL", 4) == 0))
+ tls_mode = TLS_MODE_NPTL;
+ else
+ tls_mode = TLS_MODE_LTHREADS;
+#else
ins = (guint32*)pthread_getspecific;
/* uncond branch to the real method */
if ((*ins >> 26) == 18) {
val >>= 6;
if (*ins & 2) {
/* absolute */
- ins = (guint32*)val;
+ ins = (guint32*)(long)val;
} else {
ins = (guint32*) ((char*)ins + val);
}
val >>= 6;
if (*ins & 2) {
/* absolute */
- ins = (guint32*)val;
+ ins = (guint32*)(long)val;
} else {
ins = (guint32*) ((char*)ins + val);
}
tls_mode = TLS_MODE_FAILED;
return;
}
+#endif
}
+ if ((monodomain_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
+ monodomain_key = mono_domain_get_tls_offset();
+ }
+ /* if not TLS_MODE_NPTL or local dynamic (as indicated by
+ mono_domain_get_tls_offset returning -1) then use keyed access. */
if (monodomain_key == -1) {
ptk = mono_domain_get_tls_key ();
if (ptk < 1024) {
}
}
}
+
+ if ((lmf_pthread_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
+ lmf_pthread_key = mono_get_lmf_addr_tls_offset();
+ }
+ /* if not TLS_MODE_NPTL or local dynamic (as indicated by
+ mono_get_lmf_addr_tls_offset returning -1) then use keyed access. */
if (lmf_pthread_key == -1) {
ptk = mono_pthread_key_for_tls (mono_jit_tls_id);
if (ptk < 1024) {
lmf_pthread_key = ptk;
}
}
+
+ if ((monothread_key == -1) && (tls_mode == TLS_MODE_NPTL)) {
+ monothread_key = mono_thread_get_tls_offset();
+ }
+ /* if not TLS_MODE_NPTL or local dynamic (as indicated by
+ mono_get_lmf_addr_tls_offset returning -1) then use keyed access. */
if (monothread_key == -1) {
ptk = mono_thread_get_tls_key ();
if (ptk < 1024) {
/*g_print ("thread not inited yet %d\n", ptk);*/
}
}
-#endif
}
void
if (item->check_target_idx) {
if (!item->compare_done)
item->chunk_size += CMP_SIZE;
- if (fail_tramp)
+ if (item->has_target_code)
item->chunk_size += BR_SIZE + JUMP_IMM32_SIZE;
else
item->chunk_size += LOADSTORE_SIZE + BR_SIZE + JUMP_IMM_SIZE;
} else {
if (fail_tramp) {
item->chunk_size += CMP_SIZE + BR_SIZE + JUMP_IMM32_SIZE * 2;
+ if (!item->has_target_code)
+ item->chunk_size += LOADSTORE_SIZE;
} else {
item->chunk_size += LOADSTORE_SIZE + JUMP_IMM_SIZE;
#if ENABLE_WRONG_METHOD_CHECK
} else {
/* the initial load of the vtable address */
size += PPC_LOAD_SEQUENCE_LENGTH + LOADSTORE_SIZE;
- code = mono_code_manager_reserve (domain->code_mp, size);
+ code = mono_domain_code_reserve (domain, size);
}
start = code;
if (!fail_tramp) {
}
item->jmp_code = code;
ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
- if (fail_tramp) {
+ if (item->has_target_code) {
ppc_load (code, ppc_r0, item->value.target_code);
} else {
ppc_load_reg (code, ppc_r0, (sizeof (gpointer) * item->value.vtable_slot), ppc_r11);
ppc_compare_log (code, 0, MONO_ARCH_IMT_REG, ppc_r0);
item->jmp_code = code;
ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
- ppc_load (code, ppc_r0, item->value.target_code);
+ if (item->has_target_code) {
+ ppc_load (code, ppc_r0, item->value.target_code);
+ } else {
+ g_assert (vtable);
+ ppc_load (code, ppc_r0, & (vtable->vtable [item->value.vtable_slot]));
+ ppc_load_reg_indexed (code, ppc_r0, 0, ppc_r0);
+ }
ppc_mtctr (code, ppc_r0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
ppc_patch (item->jmp_code, code);
gpointer
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
+ if (reg == ppc_r1)
+ return MONO_CONTEXT_GET_SP (ctx);
+
g_assert (reg >= ppc_r13);
return (gpointer)ctx->regs [reg - ppc_r13];