#include <mono/metadata/threads.h>
#include <mono/metadata/profiler-private.h>
#include <mono/utils/mono-math.h>
+#include <mono/utils/mono-hwcap-ia64.h>
#include "trace.h"
#include "mini-ia64.h"
return "unknown";
}
-G_GNUC_UNUSED static void
-break_count (void)
-{
-}
-
-G_GNUC_UNUSED static gboolean
-debug_count (void)
-{
- static int count = 0;
- count ++;
-
- if (count == atoi (getenv ("COUNT"))) {
- break_count ();
- }
-
- if (count > atoi (getenv ("COUNT"))) {
- return FALSE;
- }
-
- return TRUE;
-}
-
static gboolean
debug_ins_sched (void)
{
#if 0
- return debug_count ();
+ return mono_debug_count ();
#else
return TRUE;
#endif
debug_omit_fp (void)
{
#if 0
- return debug_count ();
+ return mono_debug_count ();
#else
return TRUE;
#endif
guint32 reg_usage;
guint32 freg_usage;
gboolean need_stack_align;
+ gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
static CallInfo*
get_call_info (MonoCompile *cfg, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint32 i, gr, fr;
+ guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
cinfo->ret.storage = ArgInIReg;
} else {
add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
+ if (cinfo->ret.storage == ArgOnStack) {
/* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
- if (cinfo->ret.storage == ArgInIReg)
- cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ cinfo->vtype_retaddr = TRUE;
+ }
}
break;
}
}
}
- /* this */
- if (sig->hasthis)
- add_general (&gr, &stack_size, cinfo->args + 0);
+ pstart = 0;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+ pstart = 1;
+ }
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis)
+ add_general (&gr, &stack_size, cinfo->args + 0);
+
+ if (cinfo->vtype_retaddr) {
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ }
+ }
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
* Returns the size of the argument area on the stack.
*/
int
-mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
CallInfo *cinfo = get_call_info (NULL, NULL, csig, FALSE);
* This function returns the optimizations supported on this cpu.
*/
guint32
-mono_arch_cpu_optimizazions (guint32 *exclude_mask)
+mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
*exclude_mask = 0;
return 0;
}
+/*
+ * This function test for all SIMD functions supported.
+ *
+ * Returns a bitmask corresponding to all supported versions.
+ *
+ */
+guint32
+mono_arch_cpu_enumerate_simd_versions (void)
+{
+ /* SIMD is currently unimplemented */
+ return 0;
+}
+
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
}
/* Allocate locals */
- offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE : TRUE, &locals_stack_size, &locals_stack_align);
+ offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE : TRUE, &locals_stack_size, &locals_stack_align);
if (locals_stack_align) {
offset = ALIGN_TO (offset, locals_stack_align);
}
case OP_LOADR8_MEMBASE:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8:
- case OP_ATOMIC_ADD_IMM_NEW_I4:
- case OP_ATOMIC_ADD_IMM_NEW_I8:
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_ADD_IMM_I4:
+ case OP_ATOMIC_ADD_IMM_I8:
/* There are no membase instructions on ia64 */
if (ins->inst_offset == 0) {
break;
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
code_start = cfg->native_code + offset;
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
ia64_codegen_init (code, code_start);
}
/* Calls */
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL */
- ia64_ld8 (code, GP_SCRATCH_REG, ins->sreg1);
+ /* Can't use ld8 as this could be a vtype address */
+ ia64_ld1 (code, GP_SCRATCH_REG, ins->sreg1);
break;
case OP_ARGLIST:
ia64_adds_imm (code, GP_SCRATCH_REG, cfg->sig_cookie, cfg->frame_reg);
int out_reg;
/*
- * mono_arch_find_this_arg () needs to find the this argument in a global
+ * mono_arch_get_this_arg_from_call () needs to find the this argument in a global
* register.
*/
cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
out_reg = cfg->arch.reg_out0;
- if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
- out_reg ++;
ia64_mov (code, IA64_R10, out_reg);
/* Indirect call */
CallInfo *cinfo;
int out_reg;
- /*
- * There are no membase instructions on ia64, but we can't
- * lower this since get_vcall_slot_addr () needs to decode it.
- */
-
- /* Keep this in synch with get_vcall_slot_addr */
ia64_mov (code, IA64_R11, ins->sreg1);
if (ia64_is_imm14 (ins->inst_offset))
ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
*/
cinfo = get_call_info (cfg, cfg->mempool, call->signature, FALSE);
out_reg = cfg->arch.reg_out0;
- if (cinfo->ret.storage == ArgValuetypeAddrInIReg)
- out_reg ++;
ia64_mov (code, IA64_R10, out_reg);
- ia64_begin_bundle (code);
- ia64_codegen_set_one_ins_per_bundle (code, TRUE);
-
ia64_ld8 (code, GP_SCRATCH_REG, IA64_R8);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
- /*
- * This nop will tell get_vcall_slot_addr that this is a virtual
- * call.
- */
- ia64_nop_i (code, 0x12345);
-
ia64_br_call_reg (code, IA64_B0, IA64_B6);
- ia64_codegen_set_one_ins_per_bundle (code, FALSE);
-
code = emit_move_return_value (cfg, ins, code);
break;
}
case OP_MEMORY_BARRIER:
ia64_mf (code);
break;
- case OP_ATOMIC_ADD_IMM_NEW_I4:
+ case OP_ATOMIC_ADD_IMM_I4:
g_assert (ins->inst_offset == 0);
ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
break;
- case OP_ATOMIC_ADD_IMM_NEW_I8:
+ case OP_ATOMIC_ADD_IMM_I8:
g_assert (ins->inst_offset == 0);
ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
case OP_ATOMIC_EXCHANGE_I8:
ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
break;
- case OP_ATOMIC_ADD_NEW_I4: {
+ case OP_ATOMIC_ADD_I4: {
guint8 *label, *buf;
/* From libatomic_ops */
ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
}
- case OP_ATOMIC_ADD_NEW_I8: {
+ case OP_ATOMIC_ADD_I8: {
guint8 *label, *buf;
/* From libatomic_ops */
ia64_movl (code, GP_SCRATCH_REG2, 0);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2);
ia64_br_cond_reg (code, IA64_B6);
- mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
+ // FIXME:
+ //mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
ia64_codegen_set_one_ins_per_bundle (code, FALSE);
break;
case OP_START_HANDLER: {
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
+mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
while (cfg->code_len + max_epilog_size > cfg->code_size) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
/* FIXME: Emit unwind info */
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
- mono_jit_stats.code_reallocs++;
+ cfg->stat_code_reallocs++;
}
ia64_codegen_init (code, cfg->native_code + cfg->code_len);
return 0;
}
-gpointer
-mono_arch_get_vcall_slot (guint8* code, mgreg_t *regs, int *displacement)
-{
- guint8 *bundle2 = code - 48;
- guint8 *bundle3 = code - 32;
- guint8 *bundle4 = code - 16;
- guint64 ins21 = ia64_bundle_ins1 (bundle2);
- guint64 ins22 = ia64_bundle_ins2 (bundle2);
- guint64 ins23 = ia64_bundle_ins3 (bundle2);
- guint64 ins31 = ia64_bundle_ins1 (bundle3);
- guint64 ins32 = ia64_bundle_ins2 (bundle3);
- guint64 ins33 = ia64_bundle_ins3 (bundle3);
- guint64 ins41 = ia64_bundle_ins1 (bundle4);
- guint64 ins42 = ia64_bundle_ins2 (bundle4);
- guint64 ins43 = ia64_bundle_ins3 (bundle4);
-
- /*
- * Virtual calls are made with:
- *
- * [MII] ld8 r31=[r8]
- * nop.i 0x0
- * nop.i 0x0;;
- * [MII] nop.m 0x0
- * mov.sptk b6=r31,0x2000000000f32a80
- * nop.i 0x0
- * [MII] nop.m 0x0
- * nop.i 0x123456
- * nop.i 0x0
- * [MIB] nop.m 0x0
- * nop.i 0x0
- * br.call.sptk.few b0=b6;;
- */
-
- if (((ia64_bundle_template (bundle3) == IA64_TEMPLATE_MII) ||
- (ia64_bundle_template (bundle3) == IA64_TEMPLATE_MIIS)) &&
- (ia64_bundle_template (bundle4) == IA64_TEMPLATE_MIBS) &&
- (ins31 == IA64_NOP_M) &&
- (ia64_ins_opcode (ins32) == 0) && (ia64_ins_x3 (ins32) == 0) && (ia64_ins_x6 (ins32) == 0x1) && (ia64_ins_y (ins32) == 0) &&
- (ins33 == IA64_NOP_I) &&
- (ins41 == IA64_NOP_M) &&
- (ins42 == IA64_NOP_I) &&
- (ia64_ins_opcode (ins43) == 1) && (ia64_ins_b1 (ins43) == 0) && (ia64_ins_b2 (ins43) == 6) &&
- ((ins32 >> 6) & 0xfffff) == 0x12345) {
- g_assert (ins21 == IA64_NOP_M);
- g_assert (ins23 == IA64_NOP_I);
- g_assert (ia64_ins_opcode (ins22) == 0);
- g_assert (ia64_ins_x3 (ins22) == 7);
- g_assert (ia64_ins_x (ins22) == 0);
- g_assert (ia64_ins_b1 (ins22) == IA64_B6);
-
- *displacement = (gssize)regs [IA64_R8] - (gssize)regs [IA64_R11];
-
- return (gpointer)regs [IA64_R11];
- }
-
- return NULL;
-}
-
gpointer*
mono_arch_get_delegate_method_ptr_addr (guint8* code, mgreg_t *regs)
{
}
void
-mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
+mono_arch_finish_init (void)
{
}
{
}
-#ifdef MONO_ARCH_HAVE_IMT
-
/*
* LOCKING: called with the domain lock held
*/
item->jmp_code = (guint8*)code.buf + code.nins;
ia64_br_cond_pred (code, 7, 0);
- ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
- ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
+ if (item->has_target_code) {
+ ia64_movl (code, GP_SCRATCH_REG, item->value.target_code);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, &(vtable->vtable [item->value.vtable_slot]));
+ ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
+ }
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
if (fail_case) {
+ ia64_begin_bundle (code);
ia64_patch (item->jmp_code, (guint8*)code.buf + code.nins);
ia64_movl (code, GP_SCRATCH_REG, fail_tramp);
- ia64_ld8 (code, GP_SCRATCH_REG, GP_SCRATCH_REG);
ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG);
ia64_br_cond_reg (code, IA64_B6);
item->jmp_code = NULL;
{
/* Done by the implementation of the CALL_MEMBASE opcodes */
}
-#endif
gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
return (gpointer)regs [IA64_R10];
}
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
if (is_imm) {
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->inst_imm = imm;
- ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_I4) ? STACK_I4 : STACK_I8;
} else {
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
+ opcode = OP_ATOMIC_ADD_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_I8;
else
g_assert_not_reached ();
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
}
MONO_ADD_INS (cfg->cbb, ins);
}
return 0;
}
-MonoInst*
-mono_arch_get_domain_intrinsic (MonoCompile* cfg)
-{
- return mono_get_domain_intrinsic (cfg);
-}
-
-gpointer
+mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
/* FIXME: implement */
g_assert_not_reached ();
}
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}