return atomic_add_32_nv((uint32_t*)val, add) - add;
}
-#elif defined(__i386__) || defined(__x86_64__)
-
-/*
- * NB: The *Pointer() functions here assume that
- * sizeof(pointer)==sizeof(gint32)
- *
- * NB2: These asm functions assume 486+ (some of the opcodes dont
- * exist on 386). If this becomes an issue, we can get configure to
- * fall back to the non-atomic C versions of these calls.
- */
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
- gint32 exch, gint32 comp)
-{
- gint32 old;
-
- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
- : "=m" (*dest), "=a" (old)
- : "r" (exch), "m" (*dest), "a" (comp));
- return(old);
-}
-
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
-{
- gpointer old;
-
- __asm__ __volatile__ ("lock; "
-#if defined(__x86_64__) && !defined(__native_client__)
- "cmpxchgq"
-#else
- "cmpxchgl"
-#endif
- " %2, %0"
- : "=m" (*dest), "=a" (old)
- : "r" (exch), "m" (*dest), "a" (comp));
-
- return(old);
-}
-
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
-{
- gint32 tmp;
-
- __asm__ __volatile__ ("lock; xaddl %0, %1"
- : "=r" (tmp), "=m" (*val)
- : "0" (1), "m" (*val));
-
- return(tmp+1);
-}
-
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
-{
- gint32 tmp;
-
- __asm__ __volatile__ ("lock; xaddl %0, %1"
- : "=r" (tmp), "=m" (*val)
- : "0" (-1), "m" (*val));
-
- return(tmp-1);
-}
-
-/*
- * See
- * http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
- * for the reasons for using cmpxchg and a loop here.
- */
-static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
-{
- gint32 ret;
-
- __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
- : "=m" (*val), "=a" (ret)
- : "r" (new_val), "m" (*val), "a" (*val));
- return(ret);
-}
-
-static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
- gpointer new_val)
-{
- gpointer ret;
-
- __asm__ __volatile__ ("1:; lock; "
-#if defined(__x86_64__) && !defined(__native_client__)
- "cmpxchgq"
-#else
- "cmpxchgl"
-#endif
- " %2, %0; jne 1b"
- : "=m" (*val), "=a" (ret)
- : "r" (new_val), "m" (*val), "a" (*val));
-
- return(ret);
-}
-
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
-{
- gint32 ret;
-
- __asm__ __volatile__ ("lock; xaddl %0, %1"
- : "=r" (ret), "=m" (*val)
- : "0" (add), "m" (*val));
-
- return(ret);
-}
-
#elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
G_GNUC_UNUSED
mono_memory_barrier ();
}
-static inline void mono_memory_write_barrier (void)
-{
- mono_memory_barrier ();
-}
-#elif defined(__x86_64__) || defined(TARGET_AMD64)
-static inline void mono_memory_barrier (void)
-{
- __asm__ __volatile__ ("mfence" : : : "memory");
-}
-
-static inline void mono_memory_read_barrier (void)
-{
- __asm__ __volatile__ ("lfence" : : : "memory");
-}
-
-static inline void mono_memory_write_barrier (void)
-{
- __asm__ __volatile__ ("sfence" : : : "memory");
-}
-#elif defined(__i386__) || defined(TARGET_X86)
-static inline void mono_memory_barrier (void)
-{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory");
-}
-
-static inline void mono_memory_read_barrier (void)
-{
- mono_memory_barrier ();
-}
-
static inline void mono_memory_write_barrier (void)
{
mono_memory_barrier ();