* MONO_ARCH_HAVE_ATOMIC_* macros have been removed.
* Backends now report whether they support atomic opcodes via
the mono_arch_opcode_supported () function.
* The default implementation of mono_arch_opcode_supported ()
has been removed as almost all backends need to define one
anyway.
* All old atomic_add_* opcodes have been removed. These were no
longer being used but some backends still had code for them.
* atomic_add_new_* opcodes have been renamed to atomic_add_* as
they are now the only atomic add opcodes.
* Itanium-specific atomic_add_imm_* opcodes have been moved to
the __ia64__ section of mini-ops.h.
* Comments have been added to mini-ops.h explaining the precise
semantics of the atomic opcodes.
The net result of this change is a simpler backend interface at
the cost of a slightly bigger binary.
tls_set: src1:i len:16
tls_set_reg: src1:i src2:i len:32
atomic_add_i4: src1:b src2:i dest:i len:32
-atomic_add_new_i4: src1:b src2:i dest:i len:32
-atomic_exchange_i4: src1:b src2:i dest:a len:32
atomic_add_i8: src1:b src2:i dest:i len:32
-atomic_add_new_i8: src1:b src2:i dest:i len:32
+atomic_exchange_i4: src1:b src2:i dest:a len:32
atomic_exchange_i8: src1:b src2:i dest:a len:32
atomic_cas_i4: src1:b src2:i src3:a dest:a len:24
atomic_cas_i8: src1:b src2:i src3:a dest:a len:24
gc_spill_slot_liveness_def: len:0
gc_param_slot_liveness_def: len:0
+atomic_add_i4: dest:i src1:i src2:i len:64
atomic_exchange_i4: dest:i src1:i src2:i len:64
atomic_cas_i4: dest:i src1:i src2:i src3:i len:64
-atomic_add_new_i4: dest:i src1:i src2:i len:64
arm64_cbnzw: src1:i len:16
arm64_cbnzx: src1:i len:16
-atomic_add_new_i4: dest:i src1:i src2:i len:32
-atomic_add_new_i8: dest:i src1:i src2:i len:32
+atomic_add_i4: dest:i src1:i src2:i len:32
+atomic_add_i8: dest:i src1:i src2:i len:32
atomic_exchange_i4: dest:i src1:i src2:i len:32
atomic_exchange_i8: dest:i src1:i src2:i len:32
atomic_cas_i4: dest:i src1:i src2:i src3:i len:32
aot_const: dest:i len:48
tls_get: dest:i len:48
atomic_add_i4: src1:b src2:i dest:i len:48
-atomic_add_new_i4: src1:b src2:i dest:i len:48
-atomic_exchange_i4: src1:b src2:i dest:i len:48
atomic_add_i8: src1:b src2:i dest:i len:48
-atomic_add_new_i8: src1:b src2:i dest:i len:48
-atomic_add_imm_new_i4: src1:b dest:i len:48
-atomic_add_imm_new_i8: src1:b dest:i len:48
+atomic_exchange_i4: src1:b src2:i dest:i len:48
atomic_exchange_i8: src1:b src2:i dest:i len:48
memory_barrier: len:48
+atomic_add_imm_i4: src1:b dest:i len:48
+atomic_add_imm_i8: src1:b dest:i len:48
adc: dest:i src1:i src2:i len:48
addcc: dest:i src1:i src2:i len:48
subcc: dest:i src1:i src2:i len:48
jump_table: dest:i len:20
-atomic_add_new_i4: src1:b src2:i dest:i len:20
-atomic_add_new_i8: src1:b src2:i dest:i len:20
-
+atomic_add_i4: src1:b src2:i dest:i len:20
+atomic_add_i8: src1:b src2:i dest:i len:20
atomic_cas_i4: src1:b src2:i src3:i dest:i len:38
atomic_cas_i8: src1:b src2:i src3:i dest:i len:38
aot_const: dest:i len:8
atomic_add_i4: src1:b src2:i dest:i len:28
atomic_add_i8: src1:b src2:i dest:i len:30
-atomic_add_new_i4: src1:b src2:i dest:i len:28
-atomic_add_new_i8: src1:b src2:i dest:i len:30
atomic_exchange_i4: src1:b src2:i dest:i len:18
atomic_exchange_i8: src1:b src2:i dest:i len:24
br: len:6
tls_set: src1:i len:20
tls_set_reg: src1:i src2:i len:20
atomic_add_i4: src1:b src2:i dest:i len:16
-atomic_add_new_i4: src1:b src2:i dest:i len:16
atomic_exchange_i4: src1:b src2:i dest:a len:24
atomic_cas_i4: src1:b src2:i src3:a dest:a len:24
memory_barrier: len:16
}
#endif
-#ifdef MONO_ARCH_HAVE_ATOMIC_ADD
if (strcmp (cmethod->name, "Increment") == 0) {
MonoInst *ins_iconst;
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
- opcode = OP_ATOMIC_ADD_NEW_I4;
- cfg->has_atomic_add_new_i4 = TRUE;
+ opcode = OP_ATOMIC_ADD_I4;
+ cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = ins_iconst->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
} else if (strcmp (cmethod->name, "Decrement") == 0) {
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
- opcode = OP_ATOMIC_ADD_NEW_I4;
- cfg->has_atomic_add_new_i4 = TRUE;
+ opcode = OP_ATOMIC_ADD_I4;
+ cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = ins_iconst->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
} else if (strcmp (cmethod->name, "Add") == 0) {
guint32 opcode = 0;
if (fsig->params [0]->type == MONO_TYPE_I4) {
- opcode = OP_ATOMIC_ADD_NEW_I4;
- cfg->has_atomic_add_new_i4 = TRUE;
+ opcode = OP_ATOMIC_ADD_I4;
+ cfg->has_atomic_add_i4 = TRUE;
}
#if SIZEOF_REGISTER == 8
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_I8;
#endif
if (opcode) {
if (!mono_arch_opcode_supported (opcode))
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
MONO_ADD_INS (cfg->cbb, ins);
}
}
-#endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
-#ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
if (strcmp (cmethod->name, "Exchange") == 0) {
guint32 opcode;
gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
if (cfg->gen_write_barriers && is_ref)
emit_write_barrier (cfg, args [0], args [1]);
}
-#endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
-
-#ifdef MONO_ARCH_HAVE_ATOMIC_CAS
+
if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
int size = 0;
gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
if (cfg->gen_write_barriers && is_ref)
emit_write_barrier (cfg, args [0], args [1]);
}
-#endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
if (strcmp (cmethod->name, "MemoryBarrier") == 0)
ins = emit_memory_barrier (cfg, FullBarrier);
int dreg = ins->dreg;
guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
- if (dreg == ins->inst_basereg)
- dreg = AMD64_R11;
-
- if (dreg != ins->sreg2)
- amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
-
- x86_prefix (code, X86_LOCK_PREFIX);
- amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
-
- if (dreg != ins->dreg)
- amd64_mov_reg_reg (code, ins->dreg, dreg, size);
-
- break;
- }
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8: {
- int dreg = ins->dreg;
- guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
-
if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
dreg = AMD64_R11;
}
#endif
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_CAS_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
#define MONO_ARCH_ENABLE_MONO_LMF_VAR 1
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE 1
-#define MONO_ARCH_HAVE_ATOMIC_ADD 1
-#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
-#define MONO_ARCH_HAVE_ATOMIC_CAS 1
#define MONO_ARCH_HAVE_FULL_AOT_TRAMPOLINES 1
#define MONO_ARCH_HAVE_IMT 1
#define MONO_ARCH_HAVE_TLS_GET (mono_amd64_have_tls_get ())
offset += size;
}
- if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
+ if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
/* Allocate a temporary used by the atomic ops */
size = 4;
align = 4;
break;
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
- case OP_ATOMIC_ADD_NEW_I4: {
+ case OP_ATOMIC_ADD_I4: {
int tmpreg;
guint8 *buf [16];
arm_patch (buf [2], buf [0]);
arm_patch (buf [1], code);
break;
- case OP_ATOMIC_ADD_NEW_I4:
+ case OP_ATOMIC_ADD_I4:
buf [0] = code;
ARM_DMB (code, ARM_DMB_SY);
ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
mono_arch_opcode_supported (int opcode)
{
switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_CAS_I4:
- case OP_ATOMIC_ADD_NEW_I4:
return v7_supported;
default:
return FALSE;
#define MONO_ARCH_HAVE_OP_TAIL_CALL 1
#endif
#define MONO_ARCH_HAVE_DUMMY_INIT 1
-#define MONO_ARCH_HAVE_OPCODE_SUPPORTED 1
-#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
-#define MONO_ARCH_HAVE_ATOMIC_CAS 1
-#define MONO_ARCH_HAVE_ATOMIC_ADD 1
#if defined(__native_client__)
#undef MONO_ARCH_SOFT_DEBUG_SUPPORTED
case OP_LOADR8_MEMBASE:
case OP_ATOMIC_EXCHANGE_I4:
case OP_ATOMIC_EXCHANGE_I8:
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8:
- case OP_ATOMIC_ADD_IMM_NEW_I4:
- case OP_ATOMIC_ADD_IMM_NEW_I8:
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_ADD_IMM_I4:
+ case OP_ATOMIC_ADD_IMM_I8:
/* There are no membase instructions on ia64 */
if (ins->inst_offset == 0) {
break;
case OP_MEMORY_BARRIER:
ia64_mf (code);
break;
- case OP_ATOMIC_ADD_IMM_NEW_I4:
+ case OP_ATOMIC_ADD_IMM_I4:
g_assert (ins->inst_offset == 0);
ia64_fetchadd4_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
break;
- case OP_ATOMIC_ADD_IMM_NEW_I8:
+ case OP_ATOMIC_ADD_IMM_I8:
g_assert (ins->inst_offset == 0);
ia64_fetchadd8_acq_hint (code, ins->dreg, ins->inst_basereg, ins->inst_imm, 0);
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->dreg);
case OP_ATOMIC_EXCHANGE_I8:
ia64_xchg8_hint (code, ins->dreg, ins->inst_basereg, ins->sreg2, 0);
break;
- case OP_ATOMIC_ADD_NEW_I4: {
+ case OP_ATOMIC_ADD_I4: {
guint8 *label, *buf;
/* From libatomic_ops */
ia64_add (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
}
- case OP_ATOMIC_ADD_NEW_I8: {
+ case OP_ATOMIC_ADD_I8: {
guint8 *label, *buf;
/* From libatomic_ops */
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
guint32 opcode;
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
MONO_INST_NEW (cfg, ins, opcode);
if (is_imm) {
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ opcode = OP_ATOMIC_ADD_IMM_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ opcode = OP_ATOMIC_ADD_IMM_I8;
else
g_assert_not_reached ();
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->inst_imm = imm;
- ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_I4) ? STACK_I4 : STACK_I8;
} else {
if (fsig->params [0]->type == MONO_TYPE_I4)
- opcode = OP_ATOMIC_ADD_NEW_I4;
+ opcode = OP_ATOMIC_ADD_I4;
else if (fsig->params [0]->type == MONO_TYPE_I8)
- opcode = OP_ATOMIC_ADD_NEW_I8;
+ opcode = OP_ATOMIC_ADD_I8;
else
g_assert_not_reached ();
ins->inst_basereg = args [0]->dreg;
ins->inst_offset = 0;
ins->sreg2 = args [1]->dreg;
- ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
}
MONO_ADD_INS (cfg->cbb, ins);
}
/* FIXME: implement */
g_assert_not_reached ();
}
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
#define MONO_ARCH_HAVE_INVALIDATE_METHOD 1
#define MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE 1
#define MONO_ARCH_HAVE_SAVE_UNWIND_INFO 1
-#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
#define MONO_ARCH_HAVE_IMT 1
#define MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK 1
#define MONO_ARCH_THIS_AS_FIRST_ARG 1
values [ins->dreg] = mono_llvm_build_atomic_rmw (builder, LLVM_ATOMICRMW_OP_XCHG, args [0], args [1]);
break;
}
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8: {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8: {
LLVMValueRef args [2];
LLVMTypeRef t;
- if (ins->opcode == OP_ATOMIC_ADD_NEW_I4)
+ if (ins->opcode == OP_ATOMIC_ADD_I4)
t = LLVMInt32Type ();
else
t = LLVMInt64Type ();
}
#endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ return FALSE;
+}
MINI_OP(OP_XZERO, "xzero", XREG, NONE, NONE)
MINI_OP(OP_XPHI, "xphi", XREG, NONE, NONE)
-/* Atomic specific
-
- Note, OP_ATOMIC_ADD_IMM_NEW_I4 and
- OP_ATOMIC_ADD_NEW_I4 returns the new
- value compared to OP_ATOMIC_ADD_I4 that
- returns the old value.
-
- OP_ATOMIC_ADD_NEW_I4 is used by
- Interlocked::Increment and Interlocked:Decrement
- and atomic_add_i4 by Interlocked::Add
+/*
+ * These are used for efficient implementation of the
+ * methods on the System.Threading.Interlocked class
+ * on architectures that support it. This is checked
+ * via mono_arch_opcode_supported ().
+ *
+ * Note that while the 32-bit variants are used on
+ * both 32-bit and 64-bit systems, the 64-bit variants
+ * are only used if the system is 64-bit. If that is
+ * not the case, the fallback code in the runtime is
+ * used instead. This is done because decomposing the
+ * 64-bit variants to instructions operating on 32-bit
+ * registers is very complicated on some architectures.
+ *
+ * The memory_barrier instruction translates to a full
+ * acquire/release barrier. Such a memory barrier is
+ * implied at the beginning and end of all other atomic
+ * operations, such that they ensure sequential
+ * consistency.
+ *
+ * All of these return the new value at the given
+ * memory location after performing the operation.
*/
MINI_OP(OP_ATOMIC_ADD_I4, "atomic_add_i4", IREG, IREG, IREG)
-MINI_OP(OP_ATOMIC_ADD_NEW_I4, "atomic_add_new_i4", IREG, IREG, IREG)
-MINI_OP(OP_ATOMIC_ADD_IMM_I4, "atomic_add_imm_i4", IREG, IREG, NONE)
-MINI_OP(OP_ATOMIC_ADD_IMM_NEW_I4, "atomic_add_imm_new_i4", IREG, IREG, NONE)
-MINI_OP(OP_ATOMIC_EXCHANGE_I4, "atomic_exchange_i4", IREG, IREG, IREG)
-
MINI_OP(OP_ATOMIC_ADD_I8, "atomic_add_i8", IREG, IREG, IREG)
-MINI_OP(OP_ATOMIC_ADD_NEW_I8, "atomic_add_new_i8", IREG, IREG, IREG)
-MINI_OP(OP_ATOMIC_ADD_IMM_I8, "atomic_add_imm_i8", IREG, IREG, NONE)
-MINI_OP(OP_ATOMIC_ADD_IMM_NEW_I8, "atomic_add_imm_new_i8", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_EXCHANGE_I4, "atomic_exchange_i4", IREG, IREG, IREG)
MINI_OP(OP_ATOMIC_EXCHANGE_I8, "atomic_exchange_i8", IREG, IREG, IREG)
-MINI_OP(OP_MEMORY_BARRIER, "memory_barrier", NONE, NONE, NONE)
-
MINI_OP3(OP_ATOMIC_CAS_I4, "atomic_cas_i4", IREG, IREG, IREG, IREG)
MINI_OP3(OP_ATOMIC_CAS_I8, "atomic_cas_i8", IREG, IREG, IREG, IREG)
+MINI_OP(OP_MEMORY_BARRIER, "memory_barrier", NONE, NONE, NONE)
/* Conditional move opcodes.
* Must be in the same order as the matching CEE_B... opcodes
#endif
#if defined(__ia64__)
+MINI_OP(OP_ATOMIC_ADD_IMM_I4, "atomic_add_imm_i4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_ADD_IMM_I8, "atomic_add_imm_i8", IREG, IREG, NONE)
+
MINI_OP(OP_IA64_LOAD, "ia64_load", NONE, NONE, NONE)
MINI_OP(OP_IA64_LOADI1, "ia64_loadi1", NONE, NONE, NONE)
MINI_OP(OP_IA64_LOADU1, "ia64_loadu1", NONE, NONE, NONE)
else
ppc_mr (code, ins->dreg, ins->sreg1);
break;
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8: {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8: {
guint8 *loop = code, *branch;
g_assert (ins->inst_offset == 0);
ppc_sync (code);
- if (ins->opcode == OP_ATOMIC_ADD_NEW_I4)
+ if (ins->opcode == OP_ATOMIC_ADD_I4)
ppc_lwarx (code, ppc_r0, 0, ins->inst_basereg);
else
ppc_ldarx (code, ppc_r0, 0, ins->inst_basereg);
ppc_add (code, ppc_r0, ppc_r0, ins->sreg2);
- if (ins->opcode == OP_ATOMIC_ADD_NEW_I4)
+ if (ins->opcode == OP_ATOMIC_ADD_I4)
ppc_stwcxd (code, ppc_r0, 0, ins->inst_basereg);
else
ppc_stdcxd (code, ppc_r0, 0, ins->inst_basereg);
}
#endif
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_CAS_I4:
+#ifdef TARGET_POWERPC64
+ case OP_ATOMIC_ADD_I4: /* Yes, really - see cpu-ppc(64).md. */
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_CAS_I8:
+#endif
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
#ifdef __mono_ppc64__
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
#define MONO_ARCH_NO_EMULATE_LONG_MUL_OPTS
-#define MONO_ARCH_HAVE_ATOMIC_ADD 1
#define PPC_USES_FUNCTION_DESCRIPTOR
#ifndef __mono_ilp32__
#define MONO_ARCH_EMULATE_LCONV_TO_R8_UN 1
#define MONO_ARCH_EMULATE_FREM 1
#define MONO_ARCH_BIGMUL_INTRINS 1
-#define MONO_ARCH_HAVE_ATOMIC_CAS 1
/* Parameters used by the register allocator */
#define MONO_ARCH_CALLEE_REGS ((0xff << ppc_r3) | (1 << ppc_r11) | (1 << ppc_r12))
s390_lgr (code, ins->dreg, s390_r1);
}
break;
- case OP_ATOMIC_ADD_NEW_I8: {
- s390_lgr (code, s390_r1, ins->sreg2);
- s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
- s390_agr (code, s390_r1, s390_r0);
- s390_csg (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset);
- s390_jnz (code, -10);
- s390_lgr (code, ins->dreg, s390_r1);
- }
- break;
case OP_ATOMIC_EXCHANGE_I8: {
s390_lg (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_csg (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
s390_lgfr(code, ins->dreg, s390_r1);
}
break;
- case OP_ATOMIC_ADD_NEW_I4: {
- s390_lgfr(code, s390_r1, ins->sreg2);
- s390_lgf (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
- s390_agr (code, s390_r1, s390_r0);
- s390_cs (code, s390_r0, s390_r1, ins->inst_basereg, ins->inst_offset);
- s390_jnz (code, -9);
- s390_lgfr(code, ins->dreg, s390_r1);
- }
- break;
case OP_ATOMIC_EXCHANGE_I4: {
s390_l (code, s390_r0, 0, ins->inst_basereg, ins->inst_offset);
s390_cs (code, s390_r0, ins->sreg2, ins->inst_basereg, ins->inst_offset);
/*========================= End of Function ========================*/
#endif
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
#define MONO_ARCH_NEED_DIV_CHECK 1
-#define MONO_ARCH_HAVE_ATOMIC_ADD 1
-#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
#define MONO_ARCH_SIGNAL_STACK_SIZE 256*1024
#define MONO_ARCH_HAVE_DECOMPOSE_OPTS 1
#define MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE 1
/* FIXME: implement */
g_assert_not_reached ();
}
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ return FALSE;
+}
cfg->frame_reg = X86_EBP;
offset = 0;
- if (cfg->has_atomic_add_new_i4 || cfg->has_atomic_exchange_i4) {
+ if (cfg->has_atomic_add_i4 || cfg->has_atomic_exchange_i4) {
/* The opcode implementations use callee-saved regs as scratch regs by pushing and pop-ing them, but that is not async safe */
cfg->used_int_regs |= (1 << X86_EBX) | (1 << X86_EDI) | (1 << X86_ESI);
}
case OP_ATOMIC_ADD_I4: {
int dreg = ins->dreg;
- if (dreg == ins->inst_basereg) {
- x86_push_reg (code, ins->sreg2);
- dreg = ins->sreg2;
- }
-
- if (dreg != ins->sreg2)
- x86_mov_reg_reg (code, ins->dreg, ins->sreg2, 4);
-
- x86_prefix (code, X86_LOCK_PREFIX);
- x86_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, 4);
-
- if (dreg != ins->dreg) {
- x86_mov_reg_reg (code, ins->dreg, dreg, 4);
- x86_pop_reg (code, dreg);
- }
-
- break;
- }
- case OP_ATOMIC_ADD_NEW_I4: {
- int dreg = ins->dreg;
-
- g_assert (cfg->has_atomic_add_new_i4);
+ g_assert (cfg->has_atomic_add_i4);
/* hack: limit in regalloc, dreg != sreg1 && dreg != sreg2 */
if (ins->sreg2 == dreg) {
#endif
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_CAS_I4:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
#if defined(ENABLE_GSHAREDVT)
#include "../../../mono-extensions/mono/mini/mini-x86-gsharedvt.c"
#define MONO_ARCH_NEED_GOT_VAR 1
#define MONO_ARCH_ENABLE_MONO_LMF_VAR 1
#define MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE 1
-#define MONO_ARCH_HAVE_ATOMIC_ADD 1
-#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
-#define MONO_ARCH_HAVE_ATOMIC_CAS 1
#define MONO_ARCH_HAVE_IMT 1
#define MONO_ARCH_HAVE_TLS_GET (mono_x86_have_tls_get ())
#define MONO_ARCH_IMT_REG X86_EDX
#endif
-#ifndef MONO_ARCH_HAVE_OPCODE_SUPPORTED
-
-gboolean
-mono_arch_opcode_supported (int opcode)
-{
- return TRUE;
-}
-
-#endif
-
#if defined(MONO_ARCH_GSHAREDVT_SUPPORTED) && !defined(ENABLE_GSHAREDVT)
gboolean
guint soft_breakpoints : 1;
guint arch_eh_jit_info : 1;
guint has_indirection : 1;
- guint has_atomic_add_new_i4 : 1;
+ guint has_atomic_add_i4 : 1;
guint has_atomic_exchange_i4 : 1;
guint has_atomic_cas_i4 : 1;
guint check_pinvoke_callconv : 1;