#include <sys/sysctl.h>
#endif
+#define FORCE_INDIR_CALL 1
+
enum {
TLS_MODE_DETECT,
TLS_MODE_FAILED,
TLS_MODE_DARWIN_G5
};
+/* This mutex protects architecture specific caches */
+#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
+#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
+static CRITICAL_SECTION mini_arch_mutex;
+
int mono_exc_esp_offset = 0;
static int tls_mode = TLS_MODE_DETECT;
static int lmf_pthread_key = -1;
if (csig->pinvoke)
size = mono_type_native_stack_size (csig->params [k], &align);
else
- size = mono_type_stack_size (csig->params [k], &align);
+ size = mini_type_stack_size (NULL, csig->params [k], &align);
/* ignore alignment for now */
align = 1;
return frame_size;
}
+gpointer
+mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
+{
+ char *o = NULL;
+ int reg, offset = 0;
+ guint32* code = (guint32*)code_ptr;
+
+ *displacement = 0;
+
+ /* This is the 'blrl' instruction */
+ --code;
+
+ /* Sanity check: instruction must be 'blrl' */
+ if (*code != 0x4e800021)
+ return NULL;
+
+ /* the thunk-less direct call sequence: lis/ori/mtlr/blrl */
+ if ((code [-1] >> 26) == 31 && (code [-2] >> 26) == 24 && (code [-3] >> 26) == 15) {
+ return NULL;
+ }
+
+ /* OK, we're now at the 'blrl' instruction. Now walk backwards
+ till we get to a 'mtlr rA' */
+ for (; --code;) {
+ if((*code & 0x7c0803a6) == 0x7c0803a6) {
+ gint16 soff;
+ /* Here we are: we reached the 'mtlr rA'.
+ Extract the register from the instruction */
+ reg = (*code & 0x03e00000) >> 21;
+ --code;
+ /* ok, this is a lwz reg, offset (vtreg)
+ * it is emitted with:
+ * ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((a) << 16) | (guint16)(d))
+ */
+ soff = (*code & 0xffff);
+ offset = soff;
+ reg = (*code >> 16) & 0x1f;
+ g_assert (reg != ppc_r1);
+ /*g_print ("patching reg is %d\n", reg);*/
+ if (reg >= 13) {
+ MonoLMF *lmf = (MonoLMF*)((char*)regs + (14 * sizeof (double)) + (13 * sizeof (gulong)));
+ /* saved in the MonoLMF structure */
+ o = (gpointer)lmf->iregs [reg - 13];
+ } else {
+ o = regs [reg];
+ }
+ break;
+ }
+ }
+ *displacement = offset;
+ return o;
+}
+
+gpointer*
+mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
+
+#define MAX_ARCH_DELEGATE_PARAMS 7
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ guint8 *code, *start;
+
+ /* FIXME: Support more cases */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return NULL;
+
+ if (has_target) {
+ static guint8* cached = NULL;
+ mono_mini_arch_lock ();
+ if (cached) {
+ mono_mini_arch_unlock ();
+ return cached;
+ }
+
+ start = code = mono_global_codeman_reserve (16);
+
+ /* Replace the this argument with the target */
+ ppc_lwz (code, ppc_r0, G_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
+ ppc_mtctr (code, ppc_r0);
+ ppc_lwz (code, ppc_r3, G_STRUCT_OFFSET (MonoDelegate, target), ppc_r3);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+
+ g_assert ((code - start) <= 16);
+
+ mono_arch_flush_icache (start, 16);
+ cached = start;
+ mono_mini_arch_unlock ();
+ return cached;
+ } else {
+ static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+ int size, i;
+
+ if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+ return NULL;
+ for (i = 0; i < sig->param_count; ++i)
+ if (!mono_is_regsize_var (sig->params [i]))
+ return NULL;
+
+ mono_mini_arch_lock ();
+ code = cache [sig->param_count];
+ if (code) {
+ mono_mini_arch_unlock ();
+ return code;
+ }
+
+ size = 12 + sig->param_count * 4;
+ start = code = mono_global_codeman_reserve (size);
+
+ ppc_lwz (code, ppc_r0, G_STRUCT_OFFSET (MonoDelegate, method_ptr), ppc_r3);
+ ppc_mtctr (code, ppc_r0);
+ /* slide down the arguments */
+ for (i = 0; i < sig->param_count; ++i) {
+ ppc_mr (code, (ppc_r3 + i), (ppc_r3 + i + 1));
+ }
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+
+ g_assert ((code - start) <= size);
+
+ mono_arch_flush_icache (start, size);
+ cache [sig->param_count] = start;
+ mono_mini_arch_unlock ();
+ return start;
+ }
+ return NULL;
+}
+
+gpointer
+mono_arch_get_this_arg_from_call (MonoMethodSignature *sig, gssize *regs, guint8 *code)
+{
+ /* FIXME: handle returning a struct */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return (gpointer)regs [ppc_r4];
+ return (gpointer)regs [ppc_r3];
+}
+
/*
* Initialize the cpu to execute managed code.
*/
{
}
+/*
+ * Initialize architecture specific code.
+ */
+void
+mono_arch_init (void)
+{
+ InitializeCriticalSection (&mini_arch_mutex);
+}
+
+/*
+ * Cleanup architecture specific code.
+ */
+void
+mono_arch_cleanup (void)
+{
+ DeleteCriticalSection (&mini_arch_mutex);
+}
+
/*
* This function returns the optimizations supported on this cpu.
*/
int i, top = 32;
if (cfg->frame_reg != ppc_sp)
top = 31;
- for (i = 13; i < top; ++i)
+ /* ppc_r13 is used by the system on PPC EABI */
+ for (i = 14; i < top; ++i)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
return regs;
typedef struct {
gint32 offset;
- guint16 vtsize; /* in param area */
+ guint32 vtsize; /* in param area */
guint8 reg;
guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
#else
add_general (&gr, &stack_size, cinfo->args + n, TRUE);
cinfo->args [n].regtype = RegTypeStructByAddr;
+ cinfo->args [n].vtsize = size;
#endif
n++;
break;
#else
add_general (&gr, &stack_size, cinfo->args + n, TRUE);
cinfo->args [n].regtype = RegTypeStructByAddr;
+ cinfo->args [n].vtsize = size;
#endif
n++;
break;
curinst = 0;
if (sig->hasthis) {
- inst = m->varinfo [curinst];
+ inst = m->args [curinst];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
}
for (i = 0; i < sig->param_count; ++i) {
- inst = m->varinfo [curinst];
+ inst = m->args [curinst];
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
if (arg->type == STACK_I8)
call->used_iregs |= 1 << (ainfo->reg + 1);
} else if (ainfo->regtype == RegTypeStructByAddr) {
- /* FIXME: where si the data allocated? */
- arg->backend.reg3 = ainfo->reg;
- call->used_iregs |= 1 << ainfo->reg;
+ if (ainfo->offset) {
+ MonoPPCArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoPPCArgInfo));
+ arg->opcode = OP_OUTARG_MEMBASE;
+ ai->reg = ainfo->reg;
+ ai->size = sizeof (gpointer);
+ ai->offset = ainfo->offset;
+ arg->backend.data = ai;
+ } else {
+ arg->backend.reg3 = ainfo->reg;
+ call->used_iregs |= 1 << ainfo->reg;
+ }
} else if (ainfo->regtype == RegTypeStructByVal) {
int cur_reg;
MonoPPCArgInfo *ai = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoPPCArgInfo));
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- } else {
- //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
- ins->opcode = OP_MOVE;
- ins->sreg1 = last_ins->sreg1;
- }
+ ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+ ins->sreg1 = last_ins->sreg1;
}
break;
case OP_LOADU2_MEMBASE:
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- if (ins->dreg == last_ins->sreg1) {
- last_ins->next = ins->next;
- ins = ins->next;
- continue;
- } else {
- //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
- ins->opcode = OP_MOVE;
- ins->sreg1 = last_ins->sreg1;
- }
+ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+ ins->sreg1 = last_ins->sreg1;
}
break;
case CEE_CONV_I4:
PPC_BR_LT
};
-static const char*const * ins_spec = ppcg4;
-
static void
insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
{
int imm;
/* setup the virtual reg allocator */
- if (bb->max_ireg > cfg->rs->next_vireg)
- cfg->rs->next_vireg = bb->max_ireg;
+ if (bb->max_vreg > cfg->rs->next_vreg)
+ cfg->rs->next_vreg = bb->max_vreg;
ins = bb->code;
while (ins) {
ins->opcode = map_to_reg_reg_op (ins->opcode);
last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
+ case OP_R8CONST:
+ case OP_R4CONST:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = ins->inst_p0;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->inst_basereg = temp->dreg;
+ ins->inst_offset = 0;
+ ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE;
+ last_ins = temp;
+ /* make it handle the possibly big ins->inst_offset
+ * later optimize to use lis + load_membase
+ */
+ goto loop_start;
}
last_ins = ins;
ins = ins->next;
}
bb->last_ins = last_ins;
- bb->max_ireg = cfg->rs->next_vireg;
+ bb->max_vreg = cfg->rs->next_vreg;
}
return;
}
- if (prim == 15 || ins == 0x4e800021) {
+ if (prim == 15 || ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
guint32 *seq;
- /* the trampoline code will try to patch the blrl */
- if (ins == 0x4e800021) {
+ /* the trampoline code will try to patch the blrl, blr, bcctr */
+ if (ins == 0x4e800021 || ins == 0x4e800020 || ins == 0x4e800420) {
code -= 12;
}
/* this is the lis/ori/mtlr/blrl sequence */
g_assert ((seq [0] >> 26) == 15);
g_assert ((seq [1] >> 26) == 24);
g_assert ((seq [2] >> 26) == 31);
- g_assert (seq [3] == 0x4e800021);
+ g_assert (seq [3] == 0x4e800021 || seq [3] == 0x4e800020 || seq [3] == 0x4e800420);
/* FIXME: make this thread safe */
ppc_lis (code, ppc_r0, (guint32)(target) >> 16);
ppc_ori (code, ppc_r0, ppc_r0, (guint32)(target) & 0xffff);
while (ins) {
offset = code - cfg->native_code;
- max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
+ max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
if (offset > (cfg->code_size - max_len - 16)) {
cfg->code_size *= 2;
}
}
break;
- case CEE_BREAK:
+ case OP_BREAK:
ppc_break (code);
break;
case OP_ADDCC:
ppc_cmpi (code, 0, 0, ins->sreg2, -1);
divisor_is_m1 = code;
ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0);
- ppc_lis (code, ppc_r11, 0x8000);
- ppc_cmp (code, 0, 0, ins->sreg1, ppc_r11);
+ ppc_lis (code, ppc_r0, 0x8000);
+ ppc_cmp (code, 0, 0, ins->sreg1, ppc_r0);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "ArithmeticException");
ppc_patch (divisor_is_m1, code);
/* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
break;
case OP_DIV_IMM:
- g_assert_not_reached ();
-#if 0
- ppc_load (code, ppc_r11, ins->inst_imm);
- ppc_divwod (code, ins->dreg, ins->sreg1, ppc_r11);
- ppc_mfspr (code, ppc_r0, ppc_xer);
- ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
- /* FIXME: use OverflowException for 0x80000000/-1 */
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
- break;
-#endif
- case CEE_REM: {
- guint32 *divisor_is_m1;
- ppc_cmpi (code, 0, 0, ins->sreg2, -1);
- divisor_is_m1 = code;
- ppc_bc (code, PPC_BR_FALSE | PPC_BR_LIKELY, PPC_BR_EQ, 0);
- ppc_lis (code, ppc_r11, 0x8000);
- ppc_cmp (code, 0, 0, ins->sreg1, ppc_r11);
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "ArithmeticException");
- ppc_patch (divisor_is_m1, code);
- ppc_divwod (code, ppc_r11, ins->sreg1, ins->sreg2);
- ppc_mfspr (code, ppc_r0, ppc_xer);
- ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
- /* FIXME: use OverflowException for 0x80000000/-1 */
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
- ppc_mullw (code, ppc_r11, ppc_r11, ins->sreg2);
- ppc_subf (code, ins->dreg, ppc_r11, ins->sreg1);
- break;
- }
+ case CEE_REM:
case CEE_REM_UN:
- ppc_divwuod (code, ppc_r11, ins->sreg1, ins->sreg2);
- ppc_mfspr (code, ppc_r0, ppc_xer);
- ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
- EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
- ppc_mullw (code, ppc_r11, ppc_r11, ins->sreg2);
- ppc_subf (code, ins->dreg, ppc_r11, ins->sreg1);
- break;
case OP_REM_IMM:
g_assert_not_reached ();
case CEE_OR:
if (!(ins->inst_imm & 0xffff0000)) {
ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm);
} else if (!(ins->inst_imm & 0xffff)) {
- ppc_oris (code, ins->sreg1, ins->dreg, ((guint32)(ins->inst_imm) >> 16));
+ ppc_oris (code, ins->dreg, ins->sreg1, ((guint32)(ins->inst_imm) >> 16));
} else {
g_assert_not_reached ();
}
ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
break;
case OP_SHR_UN_IMM:
- ppc_rlwinm (code, ins->dreg, ins->sreg1, (32 - (ins->inst_imm & 0x1f)), (ins->inst_imm & 0x1f), 31);
+ if (ins->inst_imm)
+ ppc_rlwinm (code, ins->dreg, ins->sreg1, (32 - (ins->inst_imm & 0x1f)), (ins->inst_imm & 0x1f), 31);
+ else
+ ppc_mr (code, ins->dreg, ins->sreg1);
break;
case CEE_SHR_UN:
ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2);
case OP_FCONV_TO_R4:
ppc_frsp (code, ins->dreg, ins->sreg1);
break;
- case CEE_JMP: {
+ case OP_JMP: {
int i, pos = 0;
/*
* Keep in sync with mono_arch_emit_epilog
*/
g_assert (!cfg->method->save_lmf);
+ /*
+ * Note: we can use ppc_r11 here because it is dead anyway:
+ * we're leaving the method.
+ */
if (1 || cfg->flags & MONO_CFG_HAS_CALLS) {
if (ppc_is_imm16 (cfg->stack_usage + PPC_RET_ADDR_OFFSET)) {
ppc_lwz (code, ppc_r0, cfg->stack_usage + PPC_RET_ADDR_OFFSET, cfg->frame_reg);
break;
case OP_ARGLIST: {
if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
- ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
+ ppc_addi (code, ppc_r0, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
} else {
- ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
- ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
+ ppc_load (code, ppc_r0, cfg->sig_cookie + cfg->stack_usage);
+ ppc_add (code, ppc_r0, cfg->frame_reg, ppc_r0);
}
- ppc_stw (code, ppc_r11, 0, ins->sreg1);
+ ppc_stw (code, ppc_r0, 0, ins->sreg1);
break;
}
case OP_FCALL:
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
else
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
- if (cfg->method->dynamic) {
+ if (FORCE_INDIR_CALL || cfg->method->dynamic) {
ppc_lis (code, ppc_r0, 0);
ppc_ori (code, ppc_r0, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_rlwinm (code, ppc_r11, ppc_r11, 0, 0, 27);
/* use ctr to store the number of words to 0 if needed */
if (ins->flags & MONO_INST_INIT) {
- /* we zero 4 bytes at a time */
- ppc_addi (code, ppc_r0, ins->sreg1, 3);
+ /* we zero 4 bytes at a time:
+ * we add 7 instead of 3 so that we set the counter to
+ * at least 1, otherwise the bdnz instruction will make
+ * it negative and iterate billions of times.
+ */
+ ppc_addi (code, ppc_r0, ins->sreg1, 7);
ppc_srawi (code, ppc_r0, ppc_r0, 2);
ppc_mtctr (code, ppc_r0);
}
if (ins->flags & MONO_INST_INIT) {
/* adjust the dest reg by -4 so we can use stwu */
- ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 4));
+ /* we actually adjust -8 because we let the loop
+ * run at least once
+ */
+ ppc_addi (code, ins->dreg, ppc_sp, (area_offset - 8));
ppc_li (code, ppc_r11, 0);
zero_loop_start = code;
ppc_stwu (code, ppc_r11, 4, ins->dreg);
case CEE_RET:
ppc_blr (code);
break;
- case CEE_THROW: {
+ case OP_THROW: {
//ppc_break (code);
ppc_mr (code, ppc_r3, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
- if (cfg->method->dynamic) {
+ if (FORCE_INDIR_CALL || cfg->method->dynamic) {
ppc_lis (code, ppc_r0, 0);
ppc_ori (code, ppc_r0, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_mr (code, ppc_r3, ins->sreg1);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
- if (cfg->method->dynamic) {
+ if (FORCE_INDIR_CALL || cfg->method->dynamic) {
ppc_lis (code, ppc_r0, 0);
ppc_ori (code, ppc_r0, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
ppc_mtlr (code, ppc_r0);
ppc_blr (code);
break;
- case CEE_ENDFINALLY:
+ case OP_ENDFINALLY:
ppc_lwz (code, ppc_r0, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
ppc_mtlr (code, ppc_r0);
ppc_blr (code);
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
- case CEE_BR:
+ case OP_BR:
//g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
//if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
//break;
/* floating point opcodes */
case OP_R8CONST:
- ppc_load (code, ppc_r11, ins->inst_p0);
- ppc_lfd (code, ins->dreg, 0, ppc_r11);
- break;
case OP_R4CONST:
- ppc_load (code, ppc_r11, ins->inst_p0);
- ppc_lfs (code, ins->dreg, 0, ppc_r11);
- break;
+ g_assert_not_reached ();
case OP_STORER8_MEMBASE_REG:
if (ppc_is_imm16 (ins->inst_offset)) {
ppc_stfd (code, ins->sreg1, ins->inst_offset, ins->inst_destbasereg);
case OP_STORER8_MEMINDEX:
ppc_stfdx (code, ins->sreg1, ins->sreg2, ins->inst_destbasereg);
break;
- case CEE_CONV_R_UN: {
- static const guint64 adjust_val = 0x4330000000000000ULL;
- ppc_addis (code, ppc_r0, ppc_r0, 0x4330);
- ppc_stw (code, ppc_r0, -8, ppc_sp);
- ppc_stw (code, ins->sreg1, -4, ppc_sp);
- ppc_load (code, ppc_r11, &adjust_val);
- ppc_lfd (code, ins->dreg, -8, ppc_sp);
- ppc_lfd (code, ppc_f0, 0, ppc_r11);
- ppc_fsub (code, ins->dreg, ins->dreg, ppc_f0);
- break;
- }
+ case CEE_CONV_R_UN:
case CEE_CONV_R4: /* FIXME: change precision */
- case CEE_CONV_R8: {
- static const guint64 adjust_val = 0x4330000080000000ULL;
- // addis is special for ppc_r0
- ppc_addis (code, ppc_r0, ppc_r0, 0x4330);
- ppc_stw (code, ppc_r0, -8, ppc_sp);
- ppc_xoris (code, ins->sreg1, ppc_r11, 0x8000);
- ppc_stw (code, ppc_r11, -4, ppc_sp);
- ppc_lfd (code, ins->dreg, -8, ppc_sp);
- ppc_load (code, ppc_r11, &adjust_val);
- ppc_lfd (code, ppc_f0, 0, ppc_r11);
- ppc_fsub (code, ins->dreg, ins->dreg, ppc_f0);
- break;
- }
+ case CEE_CONV_R8:
+ g_assert_not_reached ();
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
break;
g_assert_not_reached ();
break;
case OP_FCOMPARE:
- ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
+ ppc_fcmpu (code, 0, ins->sreg1, ins->sreg2);
break;
case OP_FCEQ:
ppc_fcmpo (code, 0, ins->sreg1, ins->sreg2);
EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
break;
case OP_FBLT:
+ ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
EMIT_COND_BRANCH (ins, CEE_BLT - CEE_BEQ);
break;
case OP_FBLT_UN:
EMIT_COND_BRANCH (ins, CEE_BLT_UN - CEE_BEQ);
break;
case OP_FBGT:
+ ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
EMIT_COND_BRANCH (ins, CEE_BGT - CEE_BEQ);
break;
case OP_FBGT_UN:
EMIT_COND_BRANCH (ins, CEE_BGT_UN - CEE_BEQ);
break;
case OP_FBGE:
+ ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
EMIT_COND_BRANCH (ins, CEE_BGE - CEE_BEQ);
break;
case OP_FBGE_UN:
EMIT_COND_BRANCH (ins, CEE_BGE_UN - CEE_BEQ);
break;
case OP_FBLE:
+ ppc_bc (code, PPC_BR_TRUE, PPC_BR_SO, 2);
EMIT_COND_BRANCH (ins, CEE_BLE - CEE_BEQ);
break;
case OP_FBLE_UN:
EMIT_COND_BRANCH (ins, CEE_BLE_UN - CEE_BEQ);
break;
- case CEE_CKFINITE: {
- ppc_stfd (code, ins->sreg1, -8, ppc_sp);
- ppc_lwz (code, ppc_r11, -8, ppc_sp);
- ppc_rlwinm (code, ppc_r11, ppc_r11, 0, 1, 31);
- ppc_addis (code, ppc_r11, ppc_r11, -32752);
- ppc_rlwinmd (code, ppc_r11, ppc_r11, 1, 31, 31);
+ case OP_CKFINITE:
+ g_assert_not_reached ();
+ case OP_CHECK_FINITE: {
+ ppc_rlwinm (code, ins->sreg1, ins->sreg1, 0, 1, 31);
+ ppc_addis (code, ins->sreg1, ins->sreg1, -32752);
+ ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31);
EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");
break;
}
max_offset += 6;
while (ins) {
- max_offset += ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
+ max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
ins = ins->next;
}
}
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
- inst = cfg->varinfo [pos];
+ inst = cfg->args [pos];
if (cfg->verbose_level > 2)
g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ppc_r11, ainfo->offset + soffset);
}
} else if (ainfo->regtype == RegTypeStructByAddr) {
+ /* if it was originally a RegTypeBase */
+ if (ainfo->offset) {
+ /* load the previous stack pointer in r11 */
+ ppc_lwz (code, ppc_r11, 0, ppc_sp);
+ ppc_lwz (code, ppc_r11, ainfo->offset, ppc_r11);
+ } else {
+ ppc_mr (code, ppc_r11, ainfo->reg);
+ }
g_assert (ppc_is_imm16 (inst->inst_offset));
- /* FIXME: handle overrun! with struct sizes not multiple of 4 */
- code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
+ code = emit_memcpy (code, ainfo->vtsize, inst->inst_basereg, inst->inst_offset, ppc_r11, 0);
+ /*g_print ("copy in %s: %d bytes from %d to offset: %d\n", method->name, ainfo->vtsize, ainfo->reg, inst->inst_offset);*/
} else
g_assert_not_reached ();
}
if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
ppc_load (code, ppc_r3, cfg->domain);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
- ppc_bl (code, 0);
+ if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ ppc_lis (code, ppc_r0, 0);
+ ppc_ori (code, ppc_r0, ppc_r0, 0);
+ ppc_mtlr (code, ppc_r0);
+ ppc_blrl (code);
+ } else {
+ ppc_bl (code, 0);
+ }
}
if (method->save_lmf) {
} else {
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_get_lmf_addr");
- if (cfg->method->dynamic) {
+ if (FORCE_INDIR_CALL || cfg->method->dynamic) {
ppc_lis (code, ppc_r0, 0);
ppc_ori (code, ppc_r0, ppc_r0, 0);
ppc_mtlr (code, ppc_r0);
}
/*
- * Keep in sync with CEE_JMP
+ * Keep in sync with OP_JMP
*/
code = cfg->native_code + cfg->code_len;
if (patch_info->type == MONO_PATCH_INFO_EXC) {
i = exception_id_by_name (patch_info->data.target);
if (!exc_throw_found [i]) {
- max_epilog_size += 12;
+ max_epilog_size += 24;
exc_throw_found [i] = TRUE;
}
} else if (patch_info->type == MONO_PATCH_INFO_BB_OVF)
MonoOvfJump *ovfj = patch_info->data.target;
i = exception_id_by_name (ovfj->data.exception);
if (!exc_throw_found [i]) {
- max_epilog_size += 12;
+ max_epilog_size += 24;
exc_throw_found [i] = TRUE;
}
max_epilog_size += 8;
patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
patch_info->data.name = "mono_arch_throw_exception_by_name";
patch_info->ip.i = code - cfg->native_code;
- ppc_b (code, 0);
+ if (FORCE_INDIR_CALL || cfg->method->dynamic) {
+ ppc_lis (code, ppc_r0, 0);
+ ppc_ori (code, ppc_r0, ppc_r0, 0);
+ ppc_mtctr (code, ppc_r0);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ ppc_b (code, 0);
+ }
break;
}
default:
}
}
+#ifdef MONO_ARCH_HAVE_IMT
+
+#define CMP_SIZE 12
+#define BR_SIZE 4
+#define JUMP_IMM_SIZE 12
+#define ENABLE_WRONG_METHOD_CHECK 0
+
+/*
+ * LOCKING: called with the domain lock held
+ */
+gpointer
+mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
+{
+ int i;
+ int size = 0;
+ guint8 *code, *start;
+
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done)
+ item->chunk_size += CMP_SIZE;
+ item->chunk_size += BR_SIZE + JUMP_IMM_SIZE;
+ } else {
+ item->chunk_size += JUMP_IMM_SIZE;
+#if ENABLE_WRONG_METHOD_CHECK
+ item->chunk_size += CMP_SIZE + BR_SIZE + 4;
+#endif
+ }
+ } else {
+ item->chunk_size += CMP_SIZE + BR_SIZE;
+ imt_entries [item->check_target_idx]->compare_done = TRUE;
+ }
+ size += item->chunk_size;
+ }
+ /* the initial load of the vtable address */
+ size += 8;
+ code = mono_code_manager_reserve (domain->code_mp, size);
+ start = code;
+ ppc_load (code, ppc_r11, (guint32)(& (vtable->vtable [0])));
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ item->code_target = code;
+ if (item->is_equals) {
+ if (item->check_target_idx) {
+ if (!item->compare_done) {
+ ppc_load (code, ppc_r0, (guint32)item->method);
+ ppc_cmpl (code, 0, 0, MONO_ARCH_IMT_REG, ppc_r0);
+ }
+ item->jmp_code = code;
+ ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
+ ppc_lwz (code, ppc_r0, (sizeof (gpointer) * item->vtable_slot), ppc_r11);
+ ppc_mtctr (code, ppc_r0);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+ } else {
+ /* enable the commented code to assert on wrong method */
+#if ENABLE_WRONG_METHOD_CHECK
+ ppc_load (code, ppc_r0, (guint32)item->method);
+ ppc_cmpl (code, 0, 0, MONO_ARCH_IMT_REG, ppc_r0);
+ item->jmp_code = code;
+ ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
+#endif
+ ppc_lwz (code, ppc_r0, (sizeof (gpointer) * item->vtable_slot), ppc_r11);
+ ppc_mtctr (code, ppc_r0);
+ ppc_bcctr (code, PPC_BR_ALWAYS, 0);
+#if ENABLE_WRONG_METHOD_CHECK
+ ppc_patch (item->jmp_code, code);
+ ppc_break (code);
+ item->jmp_code = NULL;
+#endif
+ }
+ } else {
+ ppc_load (code, ppc_r0, (guint32)item->method);
+ ppc_cmpl (code, 0, 0, MONO_ARCH_IMT_REG, ppc_r0);
+ item->jmp_code = code;
+ ppc_bc (code, PPC_BR_FALSE, PPC_BR_LT, 0);
+ }
+ }
+ /* patch the branches to get to the target items */
+ for (i = 0; i < count; ++i) {
+ MonoIMTCheckItem *item = imt_entries [i];
+ if (item->jmp_code) {
+ if (item->check_target_idx) {
+ ppc_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
+ }
+ }
+ }
+
+ mono_stats.imt_thunks_size += code - start;
+ g_assert (code - start <= size);
+ mono_arch_flush_icache (start, size);
+ return start;
+}
+
+MonoMethod*
+mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+{
+ return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
+}
+
+MonoObject*
+mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
+{
+ return mono_arch_get_this_arg_from_call (mono_method_signature (method), (gssize*)regs, NULL);
+}
+#endif
+
MonoInst*
mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{