sext_i2: dest:i src1:y len:3
tls_get: dest:i len:20
tls_get_reg: dest:i src1:i len:20
+tls_set: src1:i len:20
+tls_set_reg: src1:i src2:i len:20
atomic_add_i4: src1:b src2:i dest:i len:16
atomic_add_new_i4: src1:b src2:i dest:i len:16
atomic_exchange_i4: src1:b src2:i dest:a len:24
return code;
}
+static guint8*
+emit_tls_set_reg (guint8* code, int sreg, int offset_reg)
+{
+#ifdef HOST_WIN32
+ g_assert_not_reached ();
+#elif defined(__APPLE__)
+ // FIXME: tls_gs_offset can change too, do these when calculating the tls offset
+ x86_shift_reg_imm (code, X86_SHL, offset_reg, 2);
+ if (tls_gs_offset)
+ x86_alu_reg_imm (code, X86_ADD, offset_reg, tls_gs_offset);
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_membase_reg (code, offset_reg, 0, sreg, sizeof (mgreg_t));
+#elif defined(__linux__)
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_membase_reg (code, offset_reg, 0, sreg, sizeof (mgreg_t));
+#else
+ g_assert_not_reached ();
+#endif
+ return code;
+}
+
/*
* emit_setup_lmf:
*
code = emit_tls_get_reg (code, ins->dreg, ins->sreg1);
break;
}
+ case OP_TLS_SET: {
+ code = mono_x86_emit_tls_set (code, ins->sreg1, ins->inst_offset);
+ break;
+ }
+ case OP_TLS_SET_REG: {
+ code = emit_tls_set_reg (code, ins->sreg1, ins->sreg2);
+ break;
+ }
case OP_MEMORY_BARRIER: {
/* x86 only needs barrier for StoreLoad and FullBarrier */
switch (ins->backend.memory_barrier_kind) {