2 * atomic.h: Atomic operations
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002 Ximian, Inc.
8 * Copyright 2012 Xamarin Inc
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
14 #if defined(__NetBSD__)
15 #include <sys/param.h>
17 #if __NetBSD_Version__ > 499004000
18 #include <sys/atomic.h>
19 #define HAVE_ATOMIC_OPS
27 #if defined(__WIN32__) || defined(_WIN32)
31 #elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
33 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
34 gint32 exch, gint32 comp)
36 return atomic_cas_32((uint32_t*)dest, comp, exch);
39 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
41 return atomic_cas_ptr(dest, comp, exch);
44 static inline gint32 InterlockedIncrement(volatile gint32 *val)
46 return atomic_inc_32_nv((uint32_t*)val);
49 static inline gint32 InterlockedDecrement(volatile gint32 *val)
51 return atomic_dec_32_nv((uint32_t*)val);
54 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
56 return atomic_swap_32((uint32_t*)val, new_val);
59 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
62 return atomic_swap_ptr(val, new_val);
65 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
67 return atomic_add_32_nv((uint32_t*)val, add) - add;
70 #elif defined(__i386__) || defined(__x86_64__)
73 * NB: The *Pointer() functions here assume that
74 * sizeof(pointer)==sizeof(gint32)
76 * NB2: These asm functions assume 486+ (some of the opcodes dont
77 * exist on 386). If this becomes an issue, we can get configure to
78 * fall back to the non-atomic C versions of these calls.
81 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
82 gint32 exch, gint32 comp)
86 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
87 : "=m" (*dest), "=a" (old)
88 : "r" (exch), "m" (*dest), "a" (comp));
92 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
96 __asm__ __volatile__ ("lock; "
97 #if defined(__x86_64__) && !defined(__native_client__)
103 : "=m" (*dest), "=a" (old)
104 : "r" (exch), "m" (*dest), "a" (comp));
109 static inline gint32 InterlockedIncrement(volatile gint32 *val)
113 __asm__ __volatile__ ("lock; xaddl %0, %1"
114 : "=r" (tmp), "=m" (*val)
115 : "0" (1), "m" (*val));
120 static inline gint32 InterlockedDecrement(volatile gint32 *val)
124 __asm__ __volatile__ ("lock; xaddl %0, %1"
125 : "=r" (tmp), "=m" (*val)
126 : "0" (-1), "m" (*val));
133 * http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
134 * for the reasons for using cmpxchg and a loop here.
136 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
140 __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
141 : "=m" (*val), "=a" (ret)
142 : "r" (new_val), "m" (*val), "a" (*val));
146 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
151 __asm__ __volatile__ ("1:; lock; "
152 #if defined(__x86_64__) && !defined(__native_client__)
158 : "=m" (*val), "=a" (ret)
159 : "r" (new_val), "m" (*val), "a" (*val));
164 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
168 __asm__ __volatile__ ("lock; xaddl %0, %1"
169 : "=r" (ret), "=m" (*val)
170 : "0" (add), "m" (*val));
175 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
178 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
180 register volatile gint32 *dest asm("g1") = _dest;
181 register gint32 comp asm("o4") = _comp;
182 register gint32 exch asm("o5") = _exch;
184 __asm__ __volatile__(
185 /* cas [%%g1], %%o4, %%o5 */
188 : "0" (exch), "r" (dest), "r" (comp)
195 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
197 register volatile gpointer *dest asm("g1") = _dest;
198 register gpointer comp asm("o4") = _comp;
199 register gpointer exch asm("o5") = _exch;
201 __asm__ __volatile__(
203 /* casx [%%g1], %%o4, %%o5 */
206 /* cas [%%g1], %%o4, %%o5 */
210 : "0" (exch), "r" (dest), "r" (comp)
217 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
219 register volatile gint32 *dest asm("g1") = _dest;
220 register gint32 tmp asm("o4");
221 register gint32 ret asm("o5");
223 __asm__ __volatile__(
224 "1: ld [%%g1], %%o4\n\t"
225 " add %%o4, 1, %%o5\n\t"
226 /* cas [%%g1], %%o4, %%o5 */
227 " .word 0xdbe0500c\n\t"
228 " cmp %%o4, %%o5\n\t"
231 : "=&r" (tmp), "=&r" (ret)
239 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
241 register volatile gint32 *dest asm("g1") = _dest;
242 register gint32 tmp asm("o4");
243 register gint32 ret asm("o5");
245 __asm__ __volatile__(
246 "1: ld [%%g1], %%o4\n\t"
247 " sub %%o4, 1, %%o5\n\t"
248 /* cas [%%g1], %%o4, %%o5 */
249 " .word 0xdbe0500c\n\t"
250 " cmp %%o4, %%o5\n\t"
253 : "=&r" (tmp), "=&r" (ret)
261 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
263 register volatile gint32 *dest asm("g1") = _dest;
264 register gint32 tmp asm("o4");
265 register gint32 ret asm("o5");
267 __asm__ __volatile__(
268 "1: ld [%%g1], %%o4\n\t"
270 /* cas [%%g1], %%o4, %%o5 */
271 " .word 0xdbe0500c\n\t"
272 " cmp %%o4, %%o5\n\t"
275 : "=&r" (tmp), "=&r" (ret)
276 : "r" (dest), "r" (exch)
283 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
285 register volatile gpointer *dest asm("g1") = _dest;
286 register gpointer tmp asm("o4");
287 register gpointer ret asm("o5");
289 __asm__ __volatile__(
291 "1: ldx [%%g1], %%o4\n\t"
293 "1: ld [%%g1], %%o4\n\t"
297 /* casx [%%g1], %%o4, %%o5 */
298 " .word 0xdbf0500c\n\t"
300 /* cas [%%g1], %%o4, %%o5 */
301 " .word 0xdbe0500c\n\t"
303 " cmp %%o4, %%o5\n\t"
306 : "=&r" (tmp), "=&r" (ret)
307 : "r" (dest), "r" (exch)
314 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
316 register volatile gint32 *dest asm("g1") = _dest;
317 register gint32 tmp asm("o4");
318 register gint32 ret asm("o5");
320 __asm__ __volatile__(
321 "1: ld [%%g1], %%o4\n\t"
322 " add %%o4, %3, %%o5\n\t"
323 /* cas [%%g1], %%o4, %%o5 */
324 " .word 0xdbe0500c\n\t"
325 " cmp %%o4, %%o5\n\t"
327 " add %%o5, %3, %%o5"
328 : "=&r" (tmp), "=&r" (ret)
329 : "r" (dest), "r" (add)
338 InterlockedCompareExchange(volatile gint32 *dest,
339 gint32 exch, gint32 comp)
343 __asm__ __volatile__ ("\tLA\t1,%0\n"
346 : "+m" (*dest), "=&r" (old)
347 : "r" (exch), "r" (comp)
352 static inline gpointer
353 InterlockedCompareExchangePointer(volatile gpointer *dest,
359 __asm__ __volatile__ ("\tLA\t1,%0\n"
361 "\tCSG\t%1,%2,0(1)\n"
362 : "+m" (*dest), "=&r" (old)
363 : "r" (exch), "r" (comp)
370 InterlockedIncrement(volatile gint32 *val)
374 __asm__ __volatile__ ("\tLA\t2,%1\n"
381 : "=r" (tmp), "+m" (*val)
388 InterlockedDecrement(volatile gint32 *val)
392 __asm__ __volatile__ ("\tLA\t2,%1\n"
399 : "=r" (tmp), "+m" (*val)
406 InterlockedExchange(volatile gint32 *val, gint32 new_val)
410 __asm__ __volatile__ ("\tLA\t1,%0\n"
414 : "+m" (*val), "=&r" (ret)
421 static inline gpointer
422 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
426 __asm__ __volatile__ ("\tLA\t1,%0\n"
428 "\tCSG\t%1,%2,0(1)\n"
430 : "+m" (*val), "=&r" (ret)
438 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
442 __asm__ __volatile__ ("\tLA\t2,%1\n"
448 : "=&r" (ret), "+m" (*val)
455 #elif defined(__mono_ppc__)
457 #ifdef G_COMPILER_CODEWARRIOR
458 static inline gint32 InterlockedIncrement(volatile register gint32 *val)
460 gint32 result = 0, tmp;
461 register gint32 result = 0;
469 stwcx. result, 0, val
476 static inline gint32 InterlockedDecrement(register volatile gint32 *val)
478 register gint32 result = 0;
486 stwcx. result, 0, val
492 #define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
494 static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
496 register gint32 tmp = 0;
511 static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
513 register gint32 tmp = 0;
525 #define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
528 #if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
529 #define LDREGX "ldarx"
530 #define STREGCXD "stdcx."
531 #define CMPREG "cmpd"
533 #define LDREGX "lwarx"
534 #define STREGCXD "stwcx."
535 #define CMPREG "cmpw"
538 static inline gint32 InterlockedIncrement(volatile gint32 *val)
540 gint32 result = 0, tmp;
542 __asm__ __volatile__ ("\n1:\n\t"
543 "lwarx %0, 0, %2\n\t"
545 "stwcx. %1, 0, %2\n\t"
547 : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
551 static inline gint32 InterlockedDecrement(volatile gint32 *val)
553 gint32 result = 0, tmp;
555 __asm__ __volatile__ ("\n1:\n\t"
556 "lwarx %0, 0, %2\n\t"
557 "addi %1, %0, -1\n\t"
558 "stwcx. %1, 0, %2\n\t"
560 : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
564 static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
565 gpointer exch, gpointer comp)
569 __asm__ __volatile__ ("\n1:\n\t"
570 LDREGX " %0, 0, %1\n\t"
573 STREGCXD " %3, 0, %1\n\t"
577 : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
581 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
582 gint32 exch, gint32 comp) {
585 __asm__ __volatile__ ("\n1:\n\t"
586 "lwarx %0, 0, %1\n\t"
589 "stwcx. %3, 0, %1\n\t"
593 : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
597 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
601 __asm__ __volatile__ ("\n1:\n\t"
602 "lwarx %0, 0, %2\n\t"
603 "stwcx. %3, 0, %2\n\t"
605 : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
609 static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
613 __asm__ __volatile__ ("\n1:\n\t"
614 LDREGX " %0, 0, %2\n\t"
615 STREGCXD " %3, 0, %2\n\t"
617 : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
621 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
624 __asm__ __volatile__ ("\n1:\n\t"
625 "lwarx %0, 0, %2\n\t"
627 "stwcx. %1, 0, %2\n\t"
629 : "=&r" (result), "=&r" (tmp)
630 : "r" (dest), "r" (add) : "cc", "memory");
638 #endif /* !G_COMPILER_CODEWARRIOR */
640 #elif defined(__arm__)
642 #ifdef __native_client__
643 #define MASK_REGISTER(reg, cond) "bic" cond " " reg ", " reg ", #0xc0000000\n"
644 #define NACL_ALIGN() ".align 4\n"
646 #define MASK_REGISTER(reg, cond)
651 * Atomic operations on ARM doesn't contain memory barriers, and the runtime code
652 * depends on this, so we add them explicitly.
655 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
657 #if defined(HAVE_ARMV6)
659 __asm__ __volatile__ ( "1:\n"
664 MASK_REGISTER("%2", "al")
669 MASK_REGISTER("%2", "eq")
670 "strexeq %0, %4, [%2]\n"
674 : "=&r" (tmp), "=&r" (ret)
675 : "r" (dest), "r" (comp), "r" (exch)
682 __asm__ __volatile__ ( "0:\n\t"
684 MASK_REGISTER("%2", "al")
690 MASK_REGISTER("%2", "al")
691 "swp %0, %3, [%2]\n\t"
694 MASK_REGISTER("%2", "ne")
695 "swpne %3, %0, [%2]\n\t"
698 : "=&r" (a), "=&r" (b)
699 : "r" (dest), "r" (exch), "r" (comp)
706 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
708 #if defined(HAVE_ARMV6)
710 __asm__ __volatile__ (
716 MASK_REGISTER("%2", "al")
721 MASK_REGISTER("%2", "eq")
722 "strexeq %0, %4, [%2]\n"
726 : "=&r" (tmp), "=&r" (ret)
727 : "r" (dest), "r" (comp), "r" (exch)
734 __asm__ __volatile__ ( "0:\n\t"
736 MASK_REGISTER("%2", "al")
742 MASK_REGISTER("%2", "eq")
743 "swpeq %0, %3, [%2]\n\t"
746 MASK_REGISTER("%2", "ne")
747 "swpne %3, %0, [%2]\n\t"
750 : "=&r" (a), "=&r" (b)
751 : "r" (dest), "r" (exch), "r" (comp)
758 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
760 #if defined(HAVE_ARMV6)
762 __asm__ __volatile__ (
766 MASK_REGISTER("%2", "al")
770 MASK_REGISTER("%2", "al")
771 "strex %1, %0, [%2]\n"
775 : "=&r" (ret), "=&r" (flag)
776 : "r" (dest), "r" (1)
783 __asm__ __volatile__ ( "0:\n\t"
785 MASK_REGISTER("%3", "al")
789 MASK_REGISTER("%3", "al")
790 "swp %2, %1, [%3]\n\t"
793 MASK_REGISTER("%3", "ne")
794 "swpne %1, %2, [%3]\n\t"
796 : "=&r" (a), "=&r" (b), "=&r" (c)
797 : "r" (dest), "r" (1)
804 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
806 #if defined(HAVE_ARMV6)
808 __asm__ __volatile__ (
812 MASK_REGISTER("%2", "al")
816 MASK_REGISTER("%2", "al")
817 "strex %1, %0, [%2]\n"
821 : "=&r" (ret), "=&r" (flag)
822 : "r" (dest), "r" (1)
829 __asm__ __volatile__ ( "0:\n\t"
831 MASK_REGISTER("%3", "al")
835 MASK_REGISTER("%3", "al")
836 "swp %2, %1, [%3]\n\t"
839 MASK_REGISTER("%3", "ne")
840 "swpne %1, %2, [%3]\n\t"
842 : "=&r" (a), "=&r" (b), "=&r" (c)
843 : "r" (dest), "r" (-1)
850 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
852 #if defined(HAVE_ARMV6)
854 __asm__ __volatile__ (
858 MASK_REGISTER("%3", "al")
861 MASK_REGISTER("%3", "al")
862 "strex %1, %2, [%3]\n"
866 : "=&r" (ret), "=&r" (flag)
867 : "r" (exch), "r" (dest)
873 __asm__ __volatile__ ( NACL_ALIGN()
874 MASK_REGISTER("%1", "al")
877 : "r" (dest), "r" (exch));
883 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
885 #if defined(HAVE_ARMV6)
887 __asm__ __volatile__ (
891 MASK_REGISTER("%3", "al")
894 MASK_REGISTER("%3", "al")
895 "strex %1, %2, [%3]\n"
899 : "=&r" (ret), "=&r" (flag)
900 : "r" (exch), "r" (dest)
906 __asm__ __volatile__ ( NACL_ALIGN()
907 MASK_REGISTER("%1", "al")
910 : "r" (dest), "r" (exch));
916 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
918 #if defined(HAVE_ARMV6)
919 gint32 ret, tmp, flag;
920 __asm__ __volatile__ (
924 MASK_REGISTER("%3", "al")
928 MASK_REGISTER("%3", "al")
929 "strex %2, %1, [%3]\n"
933 : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
934 : "r" (dest), "r" (add)
941 __asm__ __volatile__ ( "0:\n\t"
943 MASK_REGISTER("%3", "al")
947 MASK_REGISTER("%3", "al")
948 "swp %2, %1, [%3]\n\t"
951 MASK_REGISTER("%3", "ne")
952 "swpne %1, %2, [%3]\n\t"
954 : "=&r" (a), "=&r" (b), "=&r" (c)
955 : "r" (dest), "r" (add)
965 #elif defined(__ia64__)
967 #ifdef __INTEL_COMPILER
968 #include <ia64intrin.h>
971 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
972 gint32 exch, gint32 comp)
977 #ifdef __INTEL_COMPILER
978 old = _InterlockedCompareExchange (dest, exch, comp);
980 /* cmpxchg4 zero extends the value read from memory */
981 real_comp = (guint64)(guint32)comp;
982 asm volatile ("mov ar.ccv = %2 ;;\n\t"
983 "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
984 : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
990 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
991 gpointer exch, gpointer comp)
995 #ifdef __INTEL_COMPILER
996 old = _InterlockedCompareExchangePointer (dest, exch, comp);
998 asm volatile ("mov ar.ccv = %2 ;;\n\t"
999 "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
1000 : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
1006 static inline gint32 InterlockedIncrement(gint32 volatile *val)
1008 #ifdef __INTEL_COMPILER
1009 return _InterlockedIncrement (val);
1015 } while (InterlockedCompareExchange (val, old + 1, old) != old);
1021 static inline gint32 InterlockedDecrement(gint32 volatile *val)
1023 #ifdef __INTEL_COMPILER
1024 return _InterlockedDecrement (val);
1030 } while (InterlockedCompareExchange (val, old - 1, old) != old);
1036 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
1038 #ifdef __INTEL_COMPILER
1039 return _InterlockedExchange (dest, new_val);
1045 } while (InterlockedCompareExchange (dest, new_val, res) != res);
1051 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
1053 #ifdef __INTEL_COMPILER
1054 return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
1060 } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
1066 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
1070 #ifdef __INTEL_COMPILER
1071 old = _InterlockedExchangeAdd (val, add);
1075 } while (InterlockedCompareExchange (val, old + add, old) != old);
1081 #elif defined(__mips__)
1083 #if SIZEOF_REGISTER == 8
1084 #error "Not implemented."
1087 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1089 gint32 tmp, result = 0;
1091 __asm__ __volatile__ (" .set mips32\n"
1097 : "=&r" (result), "=&r" (tmp), "=m" (*val)
1102 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1104 gint32 tmp, result = 0;
1106 __asm__ __volatile__ (" .set mips32\n"
1112 : "=&r" (result), "=&r" (tmp), "=m" (*val)
1117 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1118 gint32 exch, gint32 comp) {
1121 __asm__ __volatile__ (" .set mips32\n"
1128 : "=&r" (old), "=&r" (tmp), "=m" (*dest)
1129 : "m" (*dest), "r" (exch), "r" (comp));
1133 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1135 return (gpointer)(InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp)));
1138 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
1142 __asm__ __volatile__ (" .set mips32\n"
1148 : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1149 : "m" (*dest), "r" (exch));
1153 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
1155 return (gpointer)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch));
1158 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1162 __asm__ __volatile__ (" .set mips32\n"
1164 " addu %1, %0, %4\n"
1168 : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1169 : "m" (*dest), "r" (add));
1175 #define WAPI_NO_ATOMIC_ASM
1177 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1178 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1179 extern gint32 InterlockedIncrement(volatile gint32 *dest);
1180 extern gint32 InterlockedDecrement(volatile gint32 *dest);
1181 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1182 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1183 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1188 #ifdef USE_GCC_ATOMIC_OPS
1190 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1191 gint32 exch, gint32 comp)
1193 return __sync_val_compare_and_swap (dest, comp, exch);
1196 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1198 return __sync_val_compare_and_swap (dest, comp, exch);
1201 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1203 return __sync_add_and_fetch (val, 1);
1206 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1208 return __sync_add_and_fetch (val, -1);
1211 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
1216 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
1220 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
1226 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
1230 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
1232 return __sync_fetch_and_add (val, add);
1236 #endif /* _WAPI_ATOMIC_H_ */