#if SIZEOF_REGISTER == 8
if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
- MonoInst *load_ins;
+ if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
+ MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ MonoInst *load_ins;
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
- /* 64 bit reads are already atomic */
- MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
- load_ins->dreg = mono_alloc_preg (cfg);
- load_ins->inst_basereg = args [0]->dreg;
- load_ins->inst_offset = 0;
- MONO_ADD_INS (cfg->cbb, load_ins);
+ /* 64 bit reads are already atomic */
+ MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
+ load_ins->dreg = mono_alloc_preg (cfg);
+ load_ins->inst_basereg = args [0]->dreg;
+ load_ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, load_ins);
- emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
- ins = load_ins;
+ ins = load_ins;
+ }
}
#endif
MONO_ADD_INS (cfg->cbb, ins);
}
}
-
- if (strcmp (cmethod->name, "Exchange") == 0) {
+ else if (strcmp (cmethod->name, "Exchange") == 0) {
guint32 opcode;
gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
if (cfg->gen_write_barriers && is_ref)
emit_write_barrier (cfg, args [0], args [1]);
}
-
- if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
+ else if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
int size = 0;
gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
if (fsig->params [1]->type == MONO_TYPE_I4)
if (cfg->gen_write_barriers && is_ref)
emit_write_barrier (cfg, args [0], args [1]);
}
-
- if (strcmp (cmethod->name, "MemoryBarrier") == 0)
+ else if (strcmp (cmethod->name, "MemoryBarrier") == 0)
ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
+ if (ins)
+ return ins;
+ } else if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+ (strcmp (cmethod->klass->name, "Volatile") == 0)) {
+ ins = NULL;
+
+ if (!strcmp (cmethod->name, "Read")) {
+ guint32 opcode = 0;
+ gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
+
+ if (fsig->params [0]->type == MONO_TYPE_I1)
+ opcode = OP_ATOMIC_LOAD_I1;
+ else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
+ opcode = OP_ATOMIC_LOAD_U1;
+ else if (fsig->params [0]->type == MONO_TYPE_I2)
+ opcode = OP_ATOMIC_LOAD_I2;
+ else if (fsig->params [0]->type == MONO_TYPE_U2)
+ opcode = OP_ATOMIC_LOAD_U2;
+ else if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_LOAD_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_ATOMIC_LOAD_U4;
+#if SIZEOF_REGISTER == 8
+ else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
+ opcode = OP_ATOMIC_LOAD_I8;
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
+ opcode = OP_ATOMIC_LOAD_U8;
+#else
+ else if (fsig->params [0]->type == MONO_TYPE_I)
+ opcode = OP_ATOMIC_LOAD_I4;
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
+ opcode = OP_ATOMIC_LOAD_U4;
+#endif
+
+ if (opcode) {
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ }
+
+ if (!strcmp (cmethod->name, "Write")) {
+ guint32 opcode = 0;
+ gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
+
+ if (fsig->params [0]->type == MONO_TYPE_I1)
+ opcode = OP_ATOMIC_STORE_I1;
+ else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
+ opcode = OP_ATOMIC_STORE_U1;
+ else if (fsig->params [0]->type == MONO_TYPE_I2)
+ opcode = OP_ATOMIC_STORE_I2;
+ else if (fsig->params [0]->type == MONO_TYPE_U2)
+ opcode = OP_ATOMIC_STORE_U2;
+ else if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_STORE_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_ATOMIC_STORE_U4;
+#if SIZEOF_REGISTER == 8
+ else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
+ opcode = OP_ATOMIC_STORE_I8;
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
+ opcode = OP_ATOMIC_STORE_U8;
+#else
+ else if (fsig->params [0]->type == MONO_TYPE_I)
+ opcode = OP_ATOMIC_STORE_I4;
+ else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
+ opcode = OP_ATOMIC_STORE_U4;
+#endif
+
+ if (opcode) {
+ if (!mono_arch_opcode_supported (opcode))
+ return NULL;
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = args [0]->dreg;
+ ins->sreg1 = args [1]->dreg;
+ ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ if (cfg->gen_write_barriers && is_ref)
+ emit_write_barrier (cfg, args [0], args [1]);
+ }
+ }
+
if (ins)
return ins;
} else if (cmethod->klass->image == mono_defaults.corlib) {
/*
* These are used for efficient implementation of the
- * methods on the System.Threading.Interlocked class
- * on architectures that support it. This is checked
- * via mono_arch_opcode_supported ().
+ * atomic methods on Interlocked, Volatile, and Thread.
+ * This is done only on architectures that support it,
+ * as per mono_arch_opcode_supported ().
*
* Note that while the 32-bit variants are used on
* both 32-bit and 64-bit systems, the 64-bit variants
* 64-bit variants to instructions operating on 32-bit
* registers is very complicated on some architectures.
*
- * The memory_barrier instruction translates to a full
- * acquire/release barrier. Such a memory barrier is
- * implied at the beginning and end of all other atomic
- * operations, such that they ensure sequential
- * consistency.
+ * For memory_barrier and load/store instructions, the
+ * inst.backend.memory_barrier_kind field indicates
+ * which semantics to use.
*
- * All of these return the new value at the given
- * memory location after performing the operation.
-*/
+ * Where relevant, all of these return the new value at
+ * the given memory location after performing the
+ * operation.
+ */
+
+MINI_OP(OP_MEMORY_BARRIER, "memory_barrier", NONE, NONE, NONE)
+
+MINI_OP(OP_ATOMIC_LOAD_I1, "atomic_load_i1", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_I2, "atomic_load_i2", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_I4, "atomic_load_i4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_I8, "atomic_load_i8", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_U1, "atomic_load_u1", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_U2, "atomic_load_u2", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_U4, "atomic_load_u4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_LOAD_U8, "atomic_load_u8", IREG, IREG, NONE)
+
+MINI_OP(OP_ATOMIC_STORE_I1, "atomic_store_i1", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_I2, "atomic_store_i2", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_I4, "atomic_store_i4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_I8, "atomic_store_i8", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_U1, "atomic_store_u1", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_U2, "atomic_store_u2", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_U4, "atomic_store_u4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_STORE_U8, "atomic_store_u8", IREG, IREG, NONE)
+
MINI_OP(OP_ATOMIC_ADD_I4, "atomic_add_i4", IREG, IREG, IREG)
MINI_OP(OP_ATOMIC_ADD_I8, "atomic_add_i8", IREG, IREG, IREG)
+
MINI_OP(OP_ATOMIC_EXCHANGE_I4, "atomic_exchange_i4", IREG, IREG, IREG)
MINI_OP(OP_ATOMIC_EXCHANGE_I8, "atomic_exchange_i8", IREG, IREG, IREG)
+
MINI_OP3(OP_ATOMIC_CAS_I4, "atomic_cas_i4", IREG, IREG, IREG, IREG)
MINI_OP3(OP_ATOMIC_CAS_I8, "atomic_cas_i8", IREG, IREG, IREG, IREG)
-MINI_OP(OP_MEMORY_BARRIER, "memory_barrier", NONE, NONE, NONE)
/* Conditional move opcodes.
* Must be in the same order as the matching CEE_B... opcodes