2 * atomic.h: Atomic operations
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002 Ximian, Inc.
8 * Copyright 2012 Xamarin Inc
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
17 #ifdef ENABLE_EXTENSION_MODULE
18 #include "../../../mono-extensions/mono/utils/atomic.h"
21 /* On Windows, we always use the functions provided by the Windows API. */
22 #if defined(__WIN32__) || defined(_WIN32)
24 #ifndef WIN32_LEAN_AND_MEAN
25 #define WIN32_LEAN_AND_MEAN
28 #include <mono/utils/mono-membar.h>
30 /* mingw is missing InterlockedCompareExchange64 () from winbase.h */
31 #if HAVE_DECL_INTERLOCKEDCOMPAREEXCHANGE64==0
32 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
34 return __sync_val_compare_and_swap (dest, comp, exch);
38 /* mingw is missing InterlockedExchange64 () from winbase.h */
39 #if HAVE_DECL_INTERLOCKEDEXCHANGE64==0
40 static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
45 } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
50 /* mingw is missing InterlockedIncrement64 () from winbase.h */
51 #if HAVE_DECL_INTERLOCKEDINCREMENT64==0
52 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
54 return __sync_add_and_fetch (val, 1);
58 /* mingw is missing InterlockedDecrement64 () from winbase.h */
59 #if HAVE_DECL_INTERLOCKEDDECREMENT64==0
60 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
62 return __sync_sub_and_fetch (val, 1);
66 /* mingw is missing InterlockedAdd () from winbase.h */
67 #if HAVE_DECL_INTERLOCKEDADD==0
68 static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
70 return __sync_add_and_fetch (dest, add);
74 /* mingw is missing InterlockedAdd64 () from winbase.h */
75 #if HAVE_DECL_INTERLOCKEDADD64==0
76 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
78 return __sync_add_and_fetch (dest, add);
82 /* And now for some dirty hacks... The Windows API doesn't
83 * provide any useful primitives for this (other than getting
84 * into architecture-specific madness), so use CAS. */
86 static inline gint32 InterlockedRead(volatile gint32 *src)
88 return InterlockedCompareExchange (src, 0, 0);
91 static inline gint64 InterlockedRead64(volatile gint64 *src)
93 return InterlockedCompareExchange64 (src, 0, 0);
96 static inline gpointer InterlockedReadPointer(volatile gpointer *src)
98 return InterlockedCompareExchangePointer (src, NULL, NULL);
101 static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
103 InterlockedExchange (dst, val);
106 static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
108 InterlockedExchange64 (dst, val);
111 static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val)
113 InterlockedExchangePointer (dst, val);
116 /* We can't even use CAS for these, so write them out
117 * explicitly according to x86(_64) semantics... */
119 static inline gint8 InterlockedRead8(volatile gint8 *src)
124 static inline gint16 InterlockedRead16(volatile gint16 *src)
129 static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val)
132 mono_memory_barrier ();
135 static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val)
138 mono_memory_barrier ();
141 /* Prefer GCC atomic ops if the target supports it (see configure.in). */
142 #elif defined(USE_GCC_ATOMIC_OPS)
144 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
145 gint32 exch, gint32 comp)
147 return __sync_val_compare_and_swap (dest, comp, exch);
150 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
152 return __sync_val_compare_and_swap (dest, comp, exch);
155 static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
157 return __sync_add_and_fetch (dest, add);
160 static inline gint32 InterlockedIncrement(volatile gint32 *val)
162 return __sync_add_and_fetch (val, 1);
165 static inline gint32 InterlockedDecrement(volatile gint32 *val)
167 return __sync_sub_and_fetch (val, 1);
170 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
175 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
179 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
185 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
189 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
191 return __sync_fetch_and_add (val, add);
194 static inline gint8 InterlockedRead8(volatile gint8 *src)
196 /* Kind of a hack, but GCC doesn't give us anything better, and it's
197 * certainly not as bad as using a CAS loop. */
198 return __sync_fetch_and_add (src, 0);
201 static inline gint16 InterlockedRead16(volatile gint16 *src)
203 return __sync_fetch_and_add (src, 0);
206 static inline gint32 InterlockedRead(volatile gint32 *src)
208 return __sync_fetch_and_add (src, 0);
211 static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val)
213 /* Nothing useful from GCC at all, so fall back to CAS. */
217 } while (__sync_val_compare_and_swap (dst, old_val, val) != old_val);
220 static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val)
225 } while (__sync_val_compare_and_swap (dst, old_val, val) != old_val);
228 static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
230 /* Nothing useful from GCC at all, so fall back to CAS. */
234 } while (__sync_val_compare_and_swap (dst, old_val, val) != old_val);
237 #if defined (TARGET_OSX) || defined (__arm__) || (defined (__mips__) && !defined (__mips64)) || (defined (__powerpc__) && !defined (__powerpc64__)) || (defined (__sparc__) && !defined (__arch64__))
238 #define BROKEN_64BIT_ATOMICS_INTRINSIC 1
241 #if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
243 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
245 return __sync_val_compare_and_swap (dest, comp, exch);
248 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
250 return __sync_add_and_fetch (dest, add);
253 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
255 return __sync_add_and_fetch (val, 1);
258 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
260 return __sync_sub_and_fetch (val, 1);
263 static inline gint64 InterlockedExchangeAdd64(volatile gint64 *val, gint64 add)
265 return __sync_fetch_and_add (val, add);
268 static inline gint64 InterlockedRead64(volatile gint64 *src)
270 /* Kind of a hack, but GCC doesn't give us anything better. */
271 return __sync_fetch_and_add (src, 0);
276 /* Implement 64-bit cmpxchg by hand or emulate it. */
277 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
279 /* Implement all other 64-bit atomics in terms of a specialized CAS
280 * in this case, since chances are that the other 64-bit atomic
281 * intrinsics are broken too.
284 static inline gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add)
289 } while (InterlockedCompareExchange64 (dest, old_val + add, old_val) != old_val);
293 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
299 } while (InterlockedCompareExchange64 (val, set, get) != get);
303 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
309 } while (InterlockedCompareExchange64 (val, set, get) != get);
313 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
319 } while (InterlockedCompareExchange64 (dest, set, get) != get);
323 static inline gint64 InterlockedRead64(volatile gint64 *src)
325 return InterlockedCompareExchange64 (src, 0, 0);
330 static inline gpointer InterlockedReadPointer(volatile gpointer *src)
332 return InterlockedCompareExchangePointer (src, NULL, NULL);
335 static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val)
337 InterlockedExchangePointer (dst, val);
340 /* We always implement this in terms of a 64-bit cmpxchg since
341 * GCC doesn't have an intrisic to model it anyway. */
342 static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
347 } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
351 static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
353 /* Nothing useful from GCC at all, so fall back to CAS. */
354 InterlockedExchange64 (dst, val);
357 #elif defined(__ia64__)
359 #ifdef __INTEL_COMPILER
360 #include <ia64intrin.h>
363 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
364 gint32 exch, gint32 comp)
369 #ifdef __INTEL_COMPILER
370 old = _InterlockedCompareExchange (dest, exch, comp);
372 /* cmpxchg4 zero extends the value read from memory */
373 real_comp = (guint64)(guint32)comp;
374 asm volatile ("mov ar.ccv = %2 ;;\n\t"
375 "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
376 : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
382 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
383 gpointer exch, gpointer comp)
387 #ifdef __INTEL_COMPILER
388 old = _InterlockedCompareExchangePointer (dest, exch, comp);
390 asm volatile ("mov ar.ccv = %2 ;;\n\t"
391 "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
392 : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
398 static inline gint32 InterlockedIncrement(gint32 volatile *val)
400 #ifdef __INTEL_COMPILER
401 return _InterlockedIncrement (val);
407 } while (InterlockedCompareExchange (val, old + 1, old) != old);
413 static inline gint32 InterlockedDecrement(gint32 volatile *val)
415 #ifdef __INTEL_COMPILER
416 return _InterlockedDecrement (val);
422 } while (InterlockedCompareExchange (val, old - 1, old) != old);
428 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
430 #ifdef __INTEL_COMPILER
431 return _InterlockedExchange (dest, new_val);
437 } while (InterlockedCompareExchange (dest, new_val, res) != res);
443 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
445 #ifdef __INTEL_COMPILER
446 return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
452 } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
458 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
462 #ifdef __INTEL_COMPILER
463 old = _InterlockedExchangeAdd (val, add);
467 } while (InterlockedCompareExchange (val, old + add, old) != old);
475 #define WAPI_NO_ATOMIC_ASM
477 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
478 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
479 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
480 extern gint32 InterlockedAdd(volatile gint32 *dest, gint32 add);
481 extern gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add);
482 extern gint32 InterlockedIncrement(volatile gint32 *dest);
483 extern gint64 InterlockedIncrement64(volatile gint64 *dest);
484 extern gint32 InterlockedDecrement(volatile gint32 *dest);
485 extern gint64 InterlockedDecrement64(volatile gint64 *dest);
486 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
487 extern gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch);
488 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
489 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
490 extern gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add);
491 extern gint8 InterlockedRead8(volatile gint8 *src);
492 extern gint16 InterlockedRead16(volatile gint16 *src);
493 extern gint32 InterlockedRead(volatile gint32 *src);
494 extern gint64 InterlockedRead64(volatile gint64 *src);
495 extern gpointer InterlockedReadPointer(volatile gpointer *src);
496 extern void InterlockedWrite8(volatile gint8 *dst, gint8 val);
497 extern void InterlockedWrite16(volatile gint16 *dst, gint16 val);
498 extern void InterlockedWrite(volatile gint32 *dst, gint32 val);
499 extern void InterlockedWrite64(volatile gint64 *dst, gint64 val);
500 extern void InterlockedWritePointer(volatile gpointer *dst, gpointer val);
504 #endif /* _WAPI_ATOMIC_H_ */