6 * Dick Porter (dick@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2012 Xamarin Inc
10 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
13 #ifndef _WAPI_ATOMIC_H_
14 #define _WAPI_ATOMIC_H_
18 #include <mono/utils/mono-membar.h>
21 The current Nexus 7 arm-v7a fails with:
22 F/MonoDroid( 1568): shared runtime initialization error: Cannot load library: reloc_library[1285]: 37 cannot locate '__sync_val_compare_and_swap_8'
24 Apple targets have historically being problematic, xcode 4.6 would miscompile the intrinsic.
27 /* On Windows, we always use the functions provided by the Windows API. */
28 #if defined(__WIN32__) || defined(_WIN32)
30 #ifndef WIN32_LEAN_AND_MEAN
31 #define WIN32_LEAN_AND_MEAN
35 /* mingw is missing InterlockedCompareExchange64 () from winbase.h */
36 #if HAVE_DECL_INTERLOCKEDCOMPAREEXCHANGE64==0
37 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
39 return __sync_val_compare_and_swap (dest, comp, exch);
43 /* mingw is missing InterlockedExchange64 () from winbase.h */
44 #if HAVE_DECL_INTERLOCKEDEXCHANGE64==0
45 static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
50 } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
55 /* mingw is missing InterlockedIncrement64 () from winbase.h */
56 #if HAVE_DECL_INTERLOCKEDINCREMENT64==0
57 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
59 return __sync_add_and_fetch (val, 1);
63 /* mingw is missing InterlockedDecrement64 () from winbase.h */
64 #if HAVE_DECL_INTERLOCKEDDECREMENT64==0
65 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
67 return __sync_sub_and_fetch (val, 1);
71 /* mingw is missing InterlockedAdd () from winbase.h */
72 #if HAVE_DECL_INTERLOCKEDADD==0
73 static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
75 return __sync_add_and_fetch (dest, add);
79 /* mingw is missing InterlockedAdd64 () from winbase.h */
80 #if HAVE_DECL_INTERLOCKEDADD64==0
81 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
83 return __sync_add_and_fetch (dest, add);
87 #if defined(_MSC_VER) && !defined(InterlockedAdd)
88 /* MSVC before 2013 only defines InterlockedAdd* for the Itanium architecture */
89 static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
91 return InterlockedExchangeAdd (dest, add) + add;
95 #if defined(_MSC_VER) && !defined(InterlockedAdd64)
96 #if defined(InterlockedExchangeAdd64)
97 /* This may be defined only on amd64 */
98 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
100 return InterlockedExchangeAdd64 (dest, add) + add;
103 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
109 } while (prev_value != InterlockedCompareExchange64(dest, prev_value + add, prev_value));
111 return prev_value + add;
117 #define TO_INTERLOCKED_ARGP(ptr) ((volatile LONG*)(ptr))
119 #define TO_INTERLOCKED_ARGP(ptr) (ptr)
122 /* And now for some dirty hacks... The Windows API doesn't
123 * provide any useful primitives for this (other than getting
124 * into architecture-specific madness), so use CAS. */
126 static inline gint32 InterlockedRead(volatile gint32 *src)
128 return InterlockedCompareExchange (TO_INTERLOCKED_ARGP (src), 0, 0);
131 static inline gint64 InterlockedRead64(volatile gint64 *src)
133 return InterlockedCompareExchange64 (src, 0, 0);
136 static inline gpointer InterlockedReadPointer(volatile gpointer *src)
138 return InterlockedCompareExchangePointer (src, NULL, NULL);
141 static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
143 InterlockedExchange (TO_INTERLOCKED_ARGP (dst), val);
146 static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
148 InterlockedExchange64 (dst, val);
151 static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val)
153 InterlockedExchangePointer (dst, val);
156 /* We can't even use CAS for these, so write them out
157 * explicitly according to x86(_64) semantics... */
159 static inline gint8 InterlockedRead8(volatile gint8 *src)
164 static inline gint16 InterlockedRead16(volatile gint16 *src)
169 static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val)
172 mono_memory_barrier ();
175 static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val)
178 mono_memory_barrier ();
181 /* Prefer GCC atomic ops if the target supports it (see configure.ac). */
182 #elif defined(USE_GCC_ATOMIC_OPS)
185 * As of this comment (August 2016), all current Clang versions get atomic
186 * intrinsics on ARM64 wrong. All GCC versions prior to 5.3.0 do, too. The bug
187 * is the same: The compiler developers thought that the acq + rel barriers
188 * that ARM64 load/store instructions can impose are sufficient to provide
189 * sequential consistency semantics. This is not the case:
191 * http://lists.infradead.org/pipermail/linux-arm-kernel/2014-February/229588.html
193 * We work around this bug by inserting full barriers around each atomic
194 * intrinsic if we detect that we're built with a buggy compiler.
197 #if defined (HOST_ARM64) && (defined (__clang__) || MONO_GNUC_VERSION < 50300)
198 #define WRAP_ATOMIC_INTRINSIC(INTRIN) \
200 mono_memory_barrier (); \
201 __typeof__ (INTRIN) atomic_ret__ = (INTRIN); \
202 mono_memory_barrier (); \
206 #define gcc_sync_val_compare_and_swap(a, b, c) WRAP_ATOMIC_INTRINSIC (__sync_val_compare_and_swap (a, b, c))
207 #define gcc_sync_add_and_fetch(a, b) WRAP_ATOMIC_INTRINSIC (__sync_add_and_fetch (a, b))
208 #define gcc_sync_sub_and_fetch(a, b) WRAP_ATOMIC_INTRINSIC (__sync_sub_and_fetch (a, b))
209 #define gcc_sync_fetch_and_add(a, b) WRAP_ATOMIC_INTRINSIC (__sync_fetch_and_add (a, b))
211 #define gcc_sync_val_compare_and_swap(a, b, c) __sync_val_compare_and_swap (a, b, c)
212 #define gcc_sync_add_and_fetch(a, b) __sync_add_and_fetch (a, b)
213 #define gcc_sync_sub_and_fetch(a, b) __sync_sub_and_fetch (a, b)
214 #define gcc_sync_fetch_and_add(a, b) __sync_fetch_and_add (a, b)
217 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
218 gint32 exch, gint32 comp)
220 return gcc_sync_val_compare_and_swap (dest, comp, exch);
223 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
225 return gcc_sync_val_compare_and_swap (dest, comp, exch);
228 static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
230 return gcc_sync_add_and_fetch (dest, add);
233 static inline gint32 InterlockedIncrement(volatile gint32 *val)
235 return gcc_sync_add_and_fetch (val, 1);
238 static inline gint32 InterlockedDecrement(volatile gint32 *val)
240 return gcc_sync_sub_and_fetch (val, 1);
243 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
248 } while (gcc_sync_val_compare_and_swap (val, old_val, new_val) != old_val);
252 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
258 } while (gcc_sync_val_compare_and_swap (val, old_val, new_val) != old_val);
262 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
264 return gcc_sync_fetch_and_add (val, add);
267 static inline gint8 InterlockedRead8(volatile gint8 *src)
269 /* Kind of a hack, but GCC doesn't give us anything better, and it's
270 * certainly not as bad as using a CAS loop. */
271 return gcc_sync_fetch_and_add (src, 0);
274 static inline gint16 InterlockedRead16(volatile gint16 *src)
276 return gcc_sync_fetch_and_add (src, 0);
279 static inline gint32 InterlockedRead(volatile gint32 *src)
281 return gcc_sync_fetch_and_add (src, 0);
284 static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val)
286 /* Nothing useful from GCC at all, so fall back to CAS. */
290 } while (gcc_sync_val_compare_and_swap (dst, old_val, val) != old_val);
293 static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val)
298 } while (gcc_sync_val_compare_and_swap (dst, old_val, val) != old_val);
301 static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
303 /* Nothing useful from GCC at all, so fall back to CAS. */
307 } while (gcc_sync_val_compare_and_swap (dst, old_val, val) != old_val);
310 #if defined (TARGET_OSX) || defined (__arm__) || (defined (__mips__) && !defined (__mips64)) || (defined (__powerpc__) && !defined (__powerpc64__)) || (defined (__sparc__) && !defined (__arch64__))
311 #define BROKEN_64BIT_ATOMICS_INTRINSIC 1
314 #if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
316 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
318 return gcc_sync_val_compare_and_swap (dest, comp, exch);
321 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
323 return gcc_sync_add_and_fetch (dest, add);
326 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
328 return gcc_sync_add_and_fetch (val, 1);
331 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
333 return gcc_sync_sub_and_fetch (val, 1);
336 static inline gint64 InterlockedExchangeAdd64(volatile gint64 *val, gint64 add)
338 return gcc_sync_fetch_and_add (val, add);
341 static inline gint64 InterlockedRead64(volatile gint64 *src)
343 /* Kind of a hack, but GCC doesn't give us anything better. */
344 return gcc_sync_fetch_and_add (src, 0);
349 /* Implement 64-bit cmpxchg by hand or emulate it. */
350 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
352 /* Implement all other 64-bit atomics in terms of a specialized CAS
353 * in this case, since chances are that the other 64-bit atomic
354 * intrinsics are broken too.
357 static inline gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add)
362 } while (InterlockedCompareExchange64 (dest, old_val + add, old_val) != old_val);
366 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
372 } while (InterlockedCompareExchange64 (val, set, get) != get);
376 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
382 } while (InterlockedCompareExchange64 (val, set, get) != get);
386 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
392 } while (InterlockedCompareExchange64 (dest, set, get) != get);
396 static inline gint64 InterlockedRead64(volatile gint64 *src)
398 return InterlockedCompareExchange64 (src, 0, 0);
403 static inline gpointer InterlockedReadPointer(volatile gpointer *src)
405 return InterlockedCompareExchangePointer (src, NULL, NULL);
408 static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val)
410 InterlockedExchangePointer (dst, val);
413 /* We always implement this in terms of a 64-bit cmpxchg since
414 * GCC doesn't have an intrisic to model it anyway. */
415 static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
420 } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
424 static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
426 /* Nothing useful from GCC at all, so fall back to CAS. */
427 InterlockedExchange64 (dst, val);
432 #define WAPI_NO_ATOMIC_ASM
434 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
435 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
436 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
437 extern gint32 InterlockedAdd(volatile gint32 *dest, gint32 add);
438 extern gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add);
439 extern gint32 InterlockedIncrement(volatile gint32 *dest);
440 extern gint64 InterlockedIncrement64(volatile gint64 *dest);
441 extern gint32 InterlockedDecrement(volatile gint32 *dest);
442 extern gint64 InterlockedDecrement64(volatile gint64 *dest);
443 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
444 extern gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch);
445 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
446 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
447 extern gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add);
448 extern gint8 InterlockedRead8(volatile gint8 *src);
449 extern gint16 InterlockedRead16(volatile gint16 *src);
450 extern gint32 InterlockedRead(volatile gint32 *src);
451 extern gint64 InterlockedRead64(volatile gint64 *src);
452 extern gpointer InterlockedReadPointer(volatile gpointer *src);
453 extern void InterlockedWrite8(volatile gint8 *dst, gint8 val);
454 extern void InterlockedWrite16(volatile gint16 *dst, gint16 val);
455 extern void InterlockedWrite(volatile gint32 *dst, gint32 val);
456 extern void InterlockedWrite64(volatile gint64 *dst, gint64 val);
457 extern void InterlockedWritePointer(volatile gpointer *dst, gpointer val);
461 #if SIZEOF_VOID_P == 4
462 #define InterlockedAddP(p,add) InterlockedAdd ((volatile gint32*)p, (gint32)add)
464 #define InterlockedAddP(p,add) InterlockedAdd64 ((volatile gint64*)p, (gint64)add)
467 /* The following functions cannot be found on any platform, and thus they can be declared without further existence checks */
470 InterlockedWriteBool (volatile gboolean *dest, gboolean val)
472 /* both, gboolean and gint32, are int32_t; the purpose of these casts is to make things explicit */
473 InterlockedWrite ((volatile gint32 *)dest, (gint32)val);
476 #endif /* _WAPI_ATOMIC_H_ */