case OP_LOADR8_MEMBASE:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8:
- case OP_ATOMIC_ADD_IMM_NEW_I4:
- case OP_ATOMIC_ADD_IMM_NEW_I8:
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_ADD_IMM_I4:
+ case OP_ATOMIC_ADD_IMM_I8:
/* There are no membase instructions on ia64 */
if (ins->inst_offset == 0) {
break;
case OP_MEMORY_BARRIER:
ia64_mf (code);
break;
- case OP_ATOMIC_ADD_IMM_NEW_I4:
+ case OP_ATOMIC_ADD_IMM_I4:
g_assert (ins->inst_offset == 0);
ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
break;
- case OP_ATOMIC_ADD_IMM_NEW_I8:
+ case OP_ATOMIC_ADD_IMM_I8:
g_assert (ins->inst_offset == 0);
ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
case OP_ATOMIC_EXCHANGE_I8:
ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
break;
- case OP_ATOMIC_ADD_NEW_I4: {
+ case OP_ATOMIC_ADD_I4: {
guint8 *label, *buf;
/* From libatomic_ops */
ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
}
- case OP_ATOMIC_ADD_NEW_I8: {
+ case OP_ATOMIC_ADD_I8: {
guint8 *label, *buf;
/* From libatomic_ops */
{
}
-#ifdef MONO_ARCH_HAVE_IMT
-
/*
* LOCKING: called with the domain lock held
*/
{
/* Done by the implementation of the CALL_MEMBASE opcodes */
}
-#endif
gpointer
mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
return NULL;
}
+gpointer
+mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
+{
+ return NULL;
+}
+
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
if (is_imm) {
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->inst_imm = imm;
- ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_I4) ? STACK_I4 : STACK_I8;
} else {
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
+ opcode = OP_ATOMIC_ADD_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_I8;
else
g_assert_not_reached ();
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
}
MONO_ADD_INS (cfg->cbb, ins);
}
/* FIXME: implement */
g_assert_not_reached ();
}
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}