2 * atomic.h: Atomic operations
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002 Ximian, Inc.
8 * Copyright 2012 Xamarin Inc
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
17 #ifdef ENABLE_EXTENSION_MODULE
18 #include "../../../mono-extensions/mono/utils/atomic.h"
21 /* On Windows, we always use the functions provided by the Windows API. */
22 #if defined(__WIN32__) || defined(_WIN32)
26 /* mingw is missing InterlockedCompareExchange64 () from winbase.h */
27 #if HAVE_DECL_INTERLOCKEDCOMPAREEXCHANGE64==0
28 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
30 return __sync_val_compare_and_swap (dest, comp, exch);
34 /* And now for some dirty hacks... The Windows API doesn't
35 * provide any useful primitives for this (other than getting
36 * into architecture-specific madness), so use CAS. */
38 static inline gint32 InterlockedRead(volatile gint32 *src)
40 return InterlockedCompareExchange (src, 0, 0);
43 static inline gint64 InterlockedRead64(volatile gint64 *src)
45 return InterlockedCompareExchange64 (src, 0, 0);
48 static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
50 InterlockedExchange (dst, val);
53 static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
55 InterlockedExchange64 (dst, val);
58 /* Prefer GCC atomic ops if the target supports it (see configure.in). */
59 #elif defined(USE_GCC_ATOMIC_OPS)
61 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
62 gint32 exch, gint32 comp)
64 return __sync_val_compare_and_swap (dest, comp, exch);
67 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
69 return __sync_val_compare_and_swap (dest, comp, exch);
72 static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
74 return __sync_add_and_fetch (dest, add);
77 static inline gint32 InterlockedIncrement(volatile gint32 *val)
79 return __sync_add_and_fetch (val, 1);
82 static inline gint32 InterlockedDecrement(volatile gint32 *val)
84 return __sync_add_and_fetch (val, -1);
87 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
92 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
96 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
102 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
106 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
108 return __sync_fetch_and_add (val, add);
111 static inline gint32 InterlockedRead(volatile gint32 *src)
113 /* Kind of a hack, but GCC doesn't give us anything better, and it's
114 certainly not as bad as using a CAS loop. */
115 return __sync_fetch_and_add (src, 0);
118 static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
120 /* Nothing useful from GCC at all, so fall back to CAS. */
121 InterlockedExchange (dst, val);
124 #if defined (TARGET_OSX)
125 #define BROKEN_64BIT_ATOMICS_INTRINSIC 1
128 #if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
130 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
132 return __sync_val_compare_and_swap (dest, comp, exch);
135 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
137 return __sync_add_and_fetch (dest, add);
140 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
142 return __sync_add_and_fetch (val, 1);
145 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
147 return __sync_sub_and_fetch (val, 1);
150 static inline gint64 InterlockedExchangeAdd64(volatile gint64 *val, gint64 add)
152 return __sync_fetch_and_add (val, add);
155 static inline gint64 InterlockedRead64(volatile gint64 *src)
157 /* Kind of a hack, but GCC doesn't give us anything better. */
158 return __sync_fetch_and_add (src, 0);
163 /* Implement 64-bit cmpxchg by hand or emulate it. */
164 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
166 /* Implement all other 64-bit atomics in terms of a specialized CAS
167 * in this case, since chances are that the other 64-bit atomic
168 * intrinsics are broken too.
171 static inline gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add)
176 } while (InterlockedCompareExchange64 (dest, old_val + add, old_val) != old_val);
180 static inline gint64 InterlockedIncrement64(volatile gint64 *val)
186 } while (InterlockedCompareExchange64 (val, set, get) != set);
190 static inline gint64 InterlockedDecrement64(volatile gint64 *val)
196 } while (InterlockedCompareExchange64 (val, set, get) != set);
200 static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
206 } while (InterlockedCompareExchange64 (dest, set, get) != set);
210 static inline gint64 InterlockedRead64(volatile gint64 *src)
212 return InterlockedCompareExchange64 (src, 0, 0);
217 /* We always implement this in terms of a 64-bit cmpxchg since
218 * GCC doesn't have an intrisic to model it anyway. */
219 static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
224 } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
228 static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
230 /* Nothing useful from GCC at all, so fall back to CAS. */
231 InterlockedExchange64 (dst, val);
234 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
237 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
239 register volatile gint32 *dest asm("g1") = _dest;
240 register gint32 comp asm("o4") = _comp;
241 register gint32 exch asm("o5") = _exch;
243 __asm__ __volatile__(
244 /* cas [%%g1], %%o4, %%o5 */
247 : "0" (exch), "r" (dest), "r" (comp)
254 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
256 register volatile gpointer *dest asm("g1") = _dest;
257 register gpointer comp asm("o4") = _comp;
258 register gpointer exch asm("o5") = _exch;
260 __asm__ __volatile__(
262 /* casx [%%g1], %%o4, %%o5 */
265 /* cas [%%g1], %%o4, %%o5 */
269 : "0" (exch), "r" (dest), "r" (comp)
276 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
278 register volatile gint32 *dest asm("g1") = _dest;
279 register gint32 tmp asm("o4");
280 register gint32 ret asm("o5");
282 __asm__ __volatile__(
283 "1: ld [%%g1], %%o4\n\t"
284 " add %%o4, 1, %%o5\n\t"
285 /* cas [%%g1], %%o4, %%o5 */
286 " .word 0xdbe0500c\n\t"
287 " cmp %%o4, %%o5\n\t"
290 : "=&r" (tmp), "=&r" (ret)
298 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
300 register volatile gint32 *dest asm("g1") = _dest;
301 register gint32 tmp asm("o4");
302 register gint32 ret asm("o5");
304 __asm__ __volatile__(
305 "1: ld [%%g1], %%o4\n\t"
306 " sub %%o4, 1, %%o5\n\t"
307 /* cas [%%g1], %%o4, %%o5 */
308 " .word 0xdbe0500c\n\t"
309 " cmp %%o4, %%o5\n\t"
312 : "=&r" (tmp), "=&r" (ret)
320 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
322 register volatile gint32 *dest asm("g1") = _dest;
323 register gint32 tmp asm("o4");
324 register gint32 ret asm("o5");
326 __asm__ __volatile__(
327 "1: ld [%%g1], %%o4\n\t"
329 /* cas [%%g1], %%o4, %%o5 */
330 " .word 0xdbe0500c\n\t"
331 " cmp %%o4, %%o5\n\t"
334 : "=&r" (tmp), "=&r" (ret)
335 : "r" (dest), "r" (exch)
342 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
344 register volatile gpointer *dest asm("g1") = _dest;
345 register gpointer tmp asm("o4");
346 register gpointer ret asm("o5");
348 __asm__ __volatile__(
350 "1: ldx [%%g1], %%o4\n\t"
352 "1: ld [%%g1], %%o4\n\t"
356 /* casx [%%g1], %%o4, %%o5 */
357 " .word 0xdbf0500c\n\t"
359 /* cas [%%g1], %%o4, %%o5 */
360 " .word 0xdbe0500c\n\t"
362 " cmp %%o4, %%o5\n\t"
365 : "=&r" (tmp), "=&r" (ret)
366 : "r" (dest), "r" (exch)
373 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
375 register volatile gint32 *dest asm("g1") = _dest;
376 register gint32 tmp asm("o4");
377 register gint32 ret asm("o5");
379 __asm__ __volatile__(
380 "1: ld [%%g1], %%o4\n\t"
381 " add %%o4, %3, %%o5\n\t"
382 /* cas [%%g1], %%o4, %%o5 */
383 " .word 0xdbe0500c\n\t"
384 " cmp %%o4, %%o5\n\t"
386 " add %%o5, %3, %%o5"
387 : "=&r" (tmp), "=&r" (ret)
388 : "r" (dest), "r" (add)
394 #elif defined(__ia64__)
396 #ifdef __INTEL_COMPILER
397 #include <ia64intrin.h>
400 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
401 gint32 exch, gint32 comp)
406 #ifdef __INTEL_COMPILER
407 old = _InterlockedCompareExchange (dest, exch, comp);
409 /* cmpxchg4 zero extends the value read from memory */
410 real_comp = (guint64)(guint32)comp;
411 asm volatile ("mov ar.ccv = %2 ;;\n\t"
412 "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
413 : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
419 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
420 gpointer exch, gpointer comp)
424 #ifdef __INTEL_COMPILER
425 old = _InterlockedCompareExchangePointer (dest, exch, comp);
427 asm volatile ("mov ar.ccv = %2 ;;\n\t"
428 "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
429 : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
435 static inline gint32 InterlockedIncrement(gint32 volatile *val)
437 #ifdef __INTEL_COMPILER
438 return _InterlockedIncrement (val);
444 } while (InterlockedCompareExchange (val, old + 1, old) != old);
450 static inline gint32 InterlockedDecrement(gint32 volatile *val)
452 #ifdef __INTEL_COMPILER
453 return _InterlockedDecrement (val);
459 } while (InterlockedCompareExchange (val, old - 1, old) != old);
465 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
467 #ifdef __INTEL_COMPILER
468 return _InterlockedExchange (dest, new_val);
474 } while (InterlockedCompareExchange (dest, new_val, res) != res);
480 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
482 #ifdef __INTEL_COMPILER
483 return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
489 } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
495 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
499 #ifdef __INTEL_COMPILER
500 old = _InterlockedExchangeAdd (val, add);
504 } while (InterlockedCompareExchange (val, old + add, old) != old);
512 #define WAPI_NO_ATOMIC_ASM
514 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
515 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
516 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
517 extern gint32 InterlockedAdd(volatile gint32 *dest, gint32 add);
518 extern gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add);
519 extern gint32 InterlockedIncrement(volatile gint32 *dest);
520 extern gint64 InterlockedIncrement64(volatile gint64 *dest);
521 extern gint32 InterlockedDecrement(volatile gint32 *dest);
522 extern gint64 InterlockedDecrement64(volatile gint64 *dest);
523 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
524 extern gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch);
525 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
526 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
527 extern gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add);
528 extern gint32 InterlockedRead(volatile gint32 *src);
529 extern gint64 InterlockedRead64(volatile gint64 *src);
530 extern void InterlockedWrite(volatile gint32 *dst, gint32 val);
531 extern void InterlockedWrite64(volatile gint64 *dst, gint64 val);
535 #endif /* _WAPI_ATOMIC_H_ */