2 * atomic.h: Atomic operations
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002 Ximian, Inc.
8 * Copyright 2012 Xamarin Inc
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
14 #if defined(__NetBSD__)
15 #include <sys/param.h>
17 #if __NetBSD_Version__ > 499004000
18 #include <sys/atomic.h>
19 #define HAVE_ATOMIC_OPS
26 #if defined(__WIN32__) || defined(_WIN32)
30 #elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
32 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
33 gint32 exch, gint32 comp)
35 return atomic_cas_32((uint32_t*)dest, comp, exch);
38 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
40 return atomic_cas_ptr(dest, comp, exch);
43 static inline gint32 InterlockedIncrement(volatile gint32 *val)
45 return atomic_inc_32_nv((uint32_t*)val);
48 static inline gint32 InterlockedDecrement(volatile gint32 *val)
50 return atomic_dec_32_nv((uint32_t*)val);
53 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
55 return atomic_swap_32((uint32_t*)val, new_val);
58 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
61 return atomic_swap_ptr(val, new_val);
64 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
66 return atomic_add_32_nv((uint32_t*)val, add) - add;
69 #elif defined(__i386__) || defined(__x86_64__)
72 * NB: The *Pointer() functions here assume that
73 * sizeof(pointer)==sizeof(gint32)
75 * NB2: These asm functions assume 486+ (some of the opcodes dont
76 * exist on 386). If this becomes an issue, we can get configure to
77 * fall back to the non-atomic C versions of these calls.
80 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
81 gint32 exch, gint32 comp)
85 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
86 : "=m" (*dest), "=a" (old)
87 : "r" (exch), "m" (*dest), "a" (comp));
91 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
95 __asm__ __volatile__ ("lock; "
96 #if defined(__x86_64__) && !defined(__native_client__)
102 : "=m" (*dest), "=a" (old)
103 : "r" (exch), "m" (*dest), "a" (comp));
108 static inline gint32 InterlockedIncrement(volatile gint32 *val)
112 __asm__ __volatile__ ("lock; xaddl %0, %1"
113 : "=r" (tmp), "=m" (*val)
114 : "0" (1), "m" (*val));
119 static inline gint32 InterlockedDecrement(volatile gint32 *val)
123 __asm__ __volatile__ ("lock; xaddl %0, %1"
124 : "=r" (tmp), "=m" (*val)
125 : "0" (-1), "m" (*val));
132 * http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
133 * for the reasons for using cmpxchg and a loop here.
135 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
139 __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
140 : "=m" (*val), "=a" (ret)
141 : "r" (new_val), "m" (*val), "a" (*val));
145 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
150 __asm__ __volatile__ ("1:; lock; "
151 #if defined(__x86_64__) && !defined(__native_client__)
157 : "=m" (*val), "=a" (ret)
158 : "r" (new_val), "m" (*val), "a" (*val));
163 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
167 __asm__ __volatile__ ("lock; xaddl %0, %1"
168 : "=r" (ret), "=m" (*val)
169 : "0" (add), "m" (*val));
174 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
177 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
179 register volatile gint32 *dest asm("g1") = _dest;
180 register gint32 comp asm("o4") = _comp;
181 register gint32 exch asm("o5") = _exch;
183 __asm__ __volatile__(
184 /* cas [%%g1], %%o4, %%o5 */
187 : "0" (exch), "r" (dest), "r" (comp)
194 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
196 register volatile gpointer *dest asm("g1") = _dest;
197 register gpointer comp asm("o4") = _comp;
198 register gpointer exch asm("o5") = _exch;
200 __asm__ __volatile__(
202 /* casx [%%g1], %%o4, %%o5 */
205 /* cas [%%g1], %%o4, %%o5 */
209 : "0" (exch), "r" (dest), "r" (comp)
216 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
218 register volatile gint32 *dest asm("g1") = _dest;
219 register gint32 tmp asm("o4");
220 register gint32 ret asm("o5");
222 __asm__ __volatile__(
223 "1: ld [%%g1], %%o4\n\t"
224 " add %%o4, 1, %%o5\n\t"
225 /* cas [%%g1], %%o4, %%o5 */
226 " .word 0xdbe0500c\n\t"
227 " cmp %%o4, %%o5\n\t"
230 : "=&r" (tmp), "=&r" (ret)
238 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
240 register volatile gint32 *dest asm("g1") = _dest;
241 register gint32 tmp asm("o4");
242 register gint32 ret asm("o5");
244 __asm__ __volatile__(
245 "1: ld [%%g1], %%o4\n\t"
246 " sub %%o4, 1, %%o5\n\t"
247 /* cas [%%g1], %%o4, %%o5 */
248 " .word 0xdbe0500c\n\t"
249 " cmp %%o4, %%o5\n\t"
252 : "=&r" (tmp), "=&r" (ret)
260 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
262 register volatile gint32 *dest asm("g1") = _dest;
263 register gint32 tmp asm("o4");
264 register gint32 ret asm("o5");
266 __asm__ __volatile__(
267 "1: ld [%%g1], %%o4\n\t"
269 /* cas [%%g1], %%o4, %%o5 */
270 " .word 0xdbe0500c\n\t"
271 " cmp %%o4, %%o5\n\t"
274 : "=&r" (tmp), "=&r" (ret)
275 : "r" (dest), "r" (exch)
282 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
284 register volatile gpointer *dest asm("g1") = _dest;
285 register gpointer tmp asm("o4");
286 register gpointer ret asm("o5");
288 __asm__ __volatile__(
290 "1: ldx [%%g1], %%o4\n\t"
292 "1: ld [%%g1], %%o4\n\t"
296 /* casx [%%g1], %%o4, %%o5 */
297 " .word 0xdbf0500c\n\t"
299 /* cas [%%g1], %%o4, %%o5 */
300 " .word 0xdbe0500c\n\t"
302 " cmp %%o4, %%o5\n\t"
305 : "=&r" (tmp), "=&r" (ret)
306 : "r" (dest), "r" (exch)
313 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
315 register volatile gint32 *dest asm("g1") = _dest;
316 register gint32 tmp asm("o4");
317 register gint32 ret asm("o5");
319 __asm__ __volatile__(
320 "1: ld [%%g1], %%o4\n\t"
321 " add %%o4, %3, %%o5\n\t"
322 /* cas [%%g1], %%o4, %%o5 */
323 " .word 0xdbe0500c\n\t"
324 " cmp %%o4, %%o5\n\t"
326 " add %%o5, %3, %%o5"
327 : "=&r" (tmp), "=&r" (ret)
328 : "r" (dest), "r" (add)
337 InterlockedCompareExchange(volatile gint32 *dest,
338 gint32 exch, gint32 comp)
342 __asm__ __volatile__ ("\tLA\t1,%0\n"
345 : "+m" (*dest), "=&r" (old)
346 : "r" (exch), "r" (comp)
352 static inline gpointer
353 InterlockedCompareExchangePointer(volatile gpointer *dest,
354 gpointer exch, gpointer comp)
358 __asm__ __volatile__ ("\tLA\t1,%0\n"
361 : "+m" (*dest), "=&r" (old)
362 : "r" (exch), "r" (comp)
367 static inline gpointer
368 InterlockedCompareExchangePointer(volatile gpointer *dest,
374 __asm__ __volatile__ ("\tLA\t1,%0\n"
376 "\tCSG\t%1,%2,0(1)\n"
377 : "+m" (*dest), "=&r" (old)
378 : "r" (exch), "r" (comp)
387 InterlockedIncrement(volatile gint32 *val)
391 __asm__ __volatile__ ("\tLA\t2,%1\n"
398 : "=r" (tmp), "+m" (*val)
405 InterlockedIncrement(volatile gint32 *val)
409 __asm__ __volatile__ ("\tLA\t2,%1\n"
416 : "=r" (tmp), "+m" (*val)
425 InterlockedDecrement(volatile gint32 *val)
429 __asm__ __volatile__ ("\tLA\t2,%1\n"
436 : "=r" (tmp), "+m" (*val)
443 InterlockedDecrement(volatile gint32 *val)
447 __asm__ __volatile__ ("\tLA\t2,%1\n"
454 : "=r" (tmp), "+m" (*val)
462 InterlockedExchange(volatile gint32 *val, gint32 new_val)
466 __asm__ __volatile__ ("\tLA\t1,%0\n"
470 : "+m" (*val), "=&r" (ret)
478 static inline gpointer
479 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
483 __asm__ __volatile__ ("\tLA\t1,%0\n"
487 : "+m" (*val), "=&r" (ret)
494 static inline gpointer
495 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
499 __asm__ __volatile__ ("\tLA\t1,%0\n"
501 "\tCSG\t%1,%2,0(1)\n"
503 : "+m" (*val), "=&r" (ret)
513 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
517 __asm__ __volatile__ ("\tLA\t2,%1\n"
523 : "=&r" (ret), "+m" (*val)
531 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
535 __asm__ __volatile__ ("\tLA\t2,%1\n"
541 : "=&r" (ret), "+m" (*val)
549 #elif defined(__mono_ppc__)
551 #ifdef G_COMPILER_CODEWARRIOR
552 static inline gint32 InterlockedIncrement(volatile register gint32 *val)
554 gint32 result = 0, tmp;
555 register gint32 result = 0;
563 stwcx. result, 0, val
570 static inline gint32 InterlockedDecrement(register volatile gint32 *val)
572 register gint32 result = 0;
580 stwcx. result, 0, val
586 #define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
588 static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
590 register gint32 tmp = 0;
605 static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
607 register gint32 tmp = 0;
619 #define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
622 #if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
623 #define LDREGX "ldarx"
624 #define STREGCXD "stdcx."
625 #define CMPREG "cmpd"
627 #define LDREGX "lwarx"
628 #define STREGCXD "stwcx."
629 #define CMPREG "cmpw"
632 static inline gint32 InterlockedIncrement(volatile gint32 *val)
634 gint32 result = 0, tmp;
636 __asm__ __volatile__ ("\n1:\n\t"
637 "lwarx %0, 0, %2\n\t"
639 "stwcx. %1, 0, %2\n\t"
641 : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
645 static inline gint32 InterlockedDecrement(volatile gint32 *val)
647 gint32 result = 0, tmp;
649 __asm__ __volatile__ ("\n1:\n\t"
650 "lwarx %0, 0, %2\n\t"
651 "addi %1, %0, -1\n\t"
652 "stwcx. %1, 0, %2\n\t"
654 : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
658 static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
659 gpointer exch, gpointer comp)
663 __asm__ __volatile__ ("\n1:\n\t"
664 LDREGX " %0, 0, %1\n\t"
667 STREGCXD " %3, 0, %1\n\t"
671 : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
675 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
676 gint32 exch, gint32 comp) {
679 __asm__ __volatile__ ("\n1:\n\t"
680 "lwarx %0, 0, %1\n\t"
683 "stwcx. %3, 0, %1\n\t"
687 : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
691 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
695 __asm__ __volatile__ ("\n1:\n\t"
696 "lwarx %0, 0, %2\n\t"
697 "stwcx. %3, 0, %2\n\t"
699 : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
703 static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
707 __asm__ __volatile__ ("\n1:\n\t"
708 LDREGX " %0, 0, %2\n\t"
709 STREGCXD " %3, 0, %2\n\t"
711 : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
715 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
718 __asm__ __volatile__ ("\n1:\n\t"
719 "lwarx %0, 0, %2\n\t"
721 "stwcx. %1, 0, %2\n\t"
723 : "=&r" (result), "=&r" (tmp)
724 : "r" (dest), "r" (add) : "cc", "memory");
732 #endif /* !G_COMPILER_CODEWARRIOR */
734 #elif defined(__arm__)
736 #ifdef __native_client__
737 #define MASK_REGISTER(reg, cond) "bic" cond " " reg ", " reg ", #0xc0000000\n"
738 #define NACL_ALIGN() ".align 4\n"
740 #define MASK_REGISTER(reg, cond)
745 * Atomic operations on ARM doesn't contain memory barriers, and the runtime code
746 * depends on this, so we add them explicitly.
749 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
751 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
753 __asm__ __volatile__ ( "1:\n"
758 MASK_REGISTER("%2", "al")
763 MASK_REGISTER("%2", "eq")
764 "strexeq %0, %4, [%2]\n"
768 : "=&r" (tmp), "=&r" (ret)
769 : "r" (dest), "r" (comp), "r" (exch)
776 __asm__ __volatile__ ( "0:\n\t"
778 MASK_REGISTER("%2", "al")
784 MASK_REGISTER("%2", "al")
785 "swp %0, %3, [%2]\n\t"
788 MASK_REGISTER("%2", "ne")
789 "swpne %3, %0, [%2]\n\t"
792 : "=&r" (a), "=&r" (b)
793 : "r" (dest), "r" (exch), "r" (comp)
800 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
802 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
804 __asm__ __volatile__ (
810 MASK_REGISTER("%2", "al")
815 MASK_REGISTER("%2", "eq")
816 "strexeq %0, %4, [%2]\n"
820 : "=&r" (tmp), "=&r" (ret)
821 : "r" (dest), "r" (comp), "r" (exch)
828 __asm__ __volatile__ ( "0:\n\t"
830 MASK_REGISTER("%2", "al")
836 MASK_REGISTER("%2", "eq")
837 "swpeq %0, %3, [%2]\n\t"
840 MASK_REGISTER("%2", "ne")
841 "swpne %3, %0, [%2]\n\t"
844 : "=&r" (a), "=&r" (b)
845 : "r" (dest), "r" (exch), "r" (comp)
852 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
854 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
856 __asm__ __volatile__ (
860 MASK_REGISTER("%2", "al")
864 MASK_REGISTER("%2", "al")
865 "strex %1, %0, [%2]\n"
869 : "=&r" (ret), "=&r" (flag)
870 : "r" (dest), "r" (1)
877 __asm__ __volatile__ ( "0:\n\t"
879 MASK_REGISTER("%3", "al")
883 MASK_REGISTER("%3", "al")
884 "swp %2, %1, [%3]\n\t"
887 MASK_REGISTER("%3", "ne")
888 "swpne %1, %2, [%3]\n\t"
890 : "=&r" (a), "=&r" (b), "=&r" (c)
891 : "r" (dest), "r" (1)
898 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
900 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
902 __asm__ __volatile__ (
906 MASK_REGISTER("%2", "al")
910 MASK_REGISTER("%2", "al")
911 "strex %1, %0, [%2]\n"
915 : "=&r" (ret), "=&r" (flag)
916 : "r" (dest), "r" (1)
923 __asm__ __volatile__ ( "0:\n\t"
925 MASK_REGISTER("%3", "al")
929 MASK_REGISTER("%3", "al")
930 "swp %2, %1, [%3]\n\t"
933 MASK_REGISTER("%3", "ne")
934 "swpne %1, %2, [%3]\n\t"
936 : "=&r" (a), "=&r" (b), "=&r" (c)
937 : "r" (dest), "r" (-1)
944 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
946 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
948 __asm__ __volatile__ (
952 MASK_REGISTER("%3", "al")
955 MASK_REGISTER("%3", "al")
956 "strex %1, %2, [%3]\n"
960 : "=&r" (ret), "=&r" (flag)
961 : "r" (exch), "r" (dest)
967 __asm__ __volatile__ ( NACL_ALIGN()
968 MASK_REGISTER("%1", "al")
971 : "r" (dest), "r" (exch));
977 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
979 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
981 __asm__ __volatile__ (
985 MASK_REGISTER("%3", "al")
988 MASK_REGISTER("%3", "al")
989 "strex %1, %2, [%3]\n"
993 : "=&r" (ret), "=&r" (flag)
994 : "r" (exch), "r" (dest)
1000 __asm__ __volatile__ ( NACL_ALIGN()
1001 MASK_REGISTER("%1", "al")
1004 : "r" (dest), "r" (exch));
1010 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1012 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7S__)
1013 gint32 ret, tmp, flag;
1014 __asm__ __volatile__ (
1018 MASK_REGISTER("%3", "al")
1022 MASK_REGISTER("%3", "al")
1023 "strex %2, %1, [%3]\n"
1027 : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
1028 : "r" (dest), "r" (add)
1035 __asm__ __volatile__ ( "0:\n\t"
1037 MASK_REGISTER("%3", "al")
1039 "add %1, %0, %4\n\t"
1041 MASK_REGISTER("%3", "al")
1042 "swp %2, %1, [%3]\n\t"
1045 MASK_REGISTER("%3", "ne")
1046 "swpne %1, %2, [%3]\n\t"
1048 : "=&r" (a), "=&r" (b), "=&r" (c)
1049 : "r" (dest), "r" (add)
1056 #elif defined(__ia64__)
1058 #ifdef __INTEL_COMPILER
1059 #include <ia64intrin.h>
1062 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
1063 gint32 exch, gint32 comp)
1068 #ifdef __INTEL_COMPILER
1069 old = _InterlockedCompareExchange (dest, exch, comp);
1071 /* cmpxchg4 zero extends the value read from memory */
1072 real_comp = (guint64)(guint32)comp;
1073 asm volatile ("mov ar.ccv = %2 ;;\n\t"
1074 "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
1075 : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
1081 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
1082 gpointer exch, gpointer comp)
1086 #ifdef __INTEL_COMPILER
1087 old = _InterlockedCompareExchangePointer (dest, exch, comp);
1089 asm volatile ("mov ar.ccv = %2 ;;\n\t"
1090 "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
1091 : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
1097 static inline gint32 InterlockedIncrement(gint32 volatile *val)
1099 #ifdef __INTEL_COMPILER
1100 return _InterlockedIncrement (val);
1106 } while (InterlockedCompareExchange (val, old + 1, old) != old);
1112 static inline gint32 InterlockedDecrement(gint32 volatile *val)
1114 #ifdef __INTEL_COMPILER
1115 return _InterlockedDecrement (val);
1121 } while (InterlockedCompareExchange (val, old - 1, old) != old);
1127 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
1129 #ifdef __INTEL_COMPILER
1130 return _InterlockedExchange (dest, new_val);
1136 } while (InterlockedCompareExchange (dest, new_val, res) != res);
1142 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
1144 #ifdef __INTEL_COMPILER
1145 return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
1151 } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
1157 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
1161 #ifdef __INTEL_COMPILER
1162 old = _InterlockedExchangeAdd (val, add);
1166 } while (InterlockedCompareExchange (val, old + add, old) != old);
1172 #elif defined(__mips__)
1174 #if SIZEOF_REGISTER == 8
1175 #error "Not implemented."
1178 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1180 gint32 tmp, result = 0;
1182 __asm__ __volatile__ (" .set mips32\n"
1188 : "=&r" (result), "=&r" (tmp), "=m" (*val)
1193 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1195 gint32 tmp, result = 0;
1197 __asm__ __volatile__ (" .set mips32\n"
1203 : "=&r" (result), "=&r" (tmp), "=m" (*val)
1208 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1209 gint32 exch, gint32 comp) {
1212 __asm__ __volatile__ (" .set mips32\n"
1219 : "=&r" (old), "=&r" (tmp), "=m" (*dest)
1220 : "m" (*dest), "r" (exch), "r" (comp));
1224 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1226 return (gpointer)(InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp)));
1229 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
1233 __asm__ __volatile__ (" .set mips32\n"
1239 : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1240 : "m" (*dest), "r" (exch));
1244 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
1246 return (gpointer)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch));
1249 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1253 __asm__ __volatile__ (" .set mips32\n"
1255 " addu %1, %0, %4\n"
1259 : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1260 : "m" (*dest), "r" (add));
1266 #define WAPI_NO_ATOMIC_ASM
1268 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1269 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1270 extern gint32 InterlockedIncrement(volatile gint32 *dest);
1271 extern gint32 InterlockedDecrement(volatile gint32 *dest);
1272 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1273 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1274 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1279 #ifdef USE_GCC_ATOMIC_OPS
1281 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1282 gint32 exch, gint32 comp)
1284 return __sync_val_compare_and_swap (dest, comp, exch);
1287 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1289 return __sync_val_compare_and_swap (dest, comp, exch);
1292 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1294 return __sync_add_and_fetch (val, 1);
1297 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1299 return __sync_add_and_fetch (val, -1);
1302 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
1307 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
1311 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
1317 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
1321 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
1323 return __sync_fetch_and_add (val, add);
1327 #endif /* _WAPI_ATOMIC_H_ */