2 * atomic.h: Atomic operations
5 * Dick Porter (dick@ximian.com)
7 * (C) 2002 Ximian, Inc.
8 * Copyright 2012 Xamarin Inc
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
14 #if defined(__NetBSD__)
15 #include <sys/param.h>
17 #if __NetBSD_Version__ > 499004000
18 #include <sys/atomic.h>
19 #define HAVE_ATOMIC_OPS
27 /* On Windows, we always use the functions provided by the Windows API. */
28 #if defined(__WIN32__) || defined(_WIN32)
32 /* Prefer GCC atomic ops if the target supports it (see configure.in). */
33 #elif defined(USE_GCC_ATOMIC_OPS)
35 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
36 gint32 exch, gint32 comp)
38 return __sync_val_compare_and_swap (dest, comp, exch);
41 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
43 return __sync_val_compare_and_swap (dest, comp, exch);
46 static inline gint32 InterlockedIncrement(volatile gint32 *val)
48 return __sync_add_and_fetch (val, 1);
51 static inline gint32 InterlockedDecrement(volatile gint32 *val)
53 return __sync_add_and_fetch (val, -1);
56 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
61 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
65 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
71 } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
75 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
77 return __sync_fetch_and_add (val, add);
80 #elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
82 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
83 gint32 exch, gint32 comp)
85 return atomic_cas_32((uint32_t*)dest, comp, exch);
88 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
90 return atomic_cas_ptr(dest, comp, exch);
93 static inline gint32 InterlockedIncrement(volatile gint32 *val)
95 return atomic_inc_32_nv((uint32_t*)val);
98 static inline gint32 InterlockedDecrement(volatile gint32 *val)
100 return atomic_dec_32_nv((uint32_t*)val);
103 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
105 return atomic_swap_32((uint32_t*)val, new_val);
108 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
111 return atomic_swap_ptr(val, new_val);
114 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
116 return atomic_add_32_nv((uint32_t*)val, add) - add;
119 #elif defined(__i386__) || defined(__x86_64__)
122 * NB: The *Pointer() functions here assume that
123 * sizeof(pointer)==sizeof(gint32)
125 * NB2: These asm functions assume 486+ (some of the opcodes dont
126 * exist on 386). If this becomes an issue, we can get configure to
127 * fall back to the non-atomic C versions of these calls.
130 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
131 gint32 exch, gint32 comp)
135 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
136 : "=m" (*dest), "=a" (old)
137 : "r" (exch), "m" (*dest), "a" (comp));
141 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
145 __asm__ __volatile__ ("lock; "
146 #if defined(__x86_64__) && !defined(__native_client__)
152 : "=m" (*dest), "=a" (old)
153 : "r" (exch), "m" (*dest), "a" (comp));
158 static inline gint32 InterlockedIncrement(volatile gint32 *val)
162 __asm__ __volatile__ ("lock; xaddl %0, %1"
163 : "=r" (tmp), "=m" (*val)
164 : "0" (1), "m" (*val));
169 static inline gint32 InterlockedDecrement(volatile gint32 *val)
173 __asm__ __volatile__ ("lock; xaddl %0, %1"
174 : "=r" (tmp), "=m" (*val)
175 : "0" (-1), "m" (*val));
182 * http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
183 * for the reasons for using cmpxchg and a loop here.
185 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
189 __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
190 : "=m" (*val), "=a" (ret)
191 : "r" (new_val), "m" (*val), "a" (*val));
195 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
200 __asm__ __volatile__ ("1:; lock; "
201 #if defined(__x86_64__) && !defined(__native_client__)
207 : "=m" (*val), "=a" (ret)
208 : "r" (new_val), "m" (*val), "a" (*val));
213 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
217 __asm__ __volatile__ ("lock; xaddl %0, %1"
218 : "=r" (ret), "=m" (*val)
219 : "0" (add), "m" (*val));
224 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
227 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
229 register volatile gint32 *dest asm("g1") = _dest;
230 register gint32 comp asm("o4") = _comp;
231 register gint32 exch asm("o5") = _exch;
233 __asm__ __volatile__(
234 /* cas [%%g1], %%o4, %%o5 */
237 : "0" (exch), "r" (dest), "r" (comp)
244 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
246 register volatile gpointer *dest asm("g1") = _dest;
247 register gpointer comp asm("o4") = _comp;
248 register gpointer exch asm("o5") = _exch;
250 __asm__ __volatile__(
252 /* casx [%%g1], %%o4, %%o5 */
255 /* cas [%%g1], %%o4, %%o5 */
259 : "0" (exch), "r" (dest), "r" (comp)
266 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
268 register volatile gint32 *dest asm("g1") = _dest;
269 register gint32 tmp asm("o4");
270 register gint32 ret asm("o5");
272 __asm__ __volatile__(
273 "1: ld [%%g1], %%o4\n\t"
274 " add %%o4, 1, %%o5\n\t"
275 /* cas [%%g1], %%o4, %%o5 */
276 " .word 0xdbe0500c\n\t"
277 " cmp %%o4, %%o5\n\t"
280 : "=&r" (tmp), "=&r" (ret)
288 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
290 register volatile gint32 *dest asm("g1") = _dest;
291 register gint32 tmp asm("o4");
292 register gint32 ret asm("o5");
294 __asm__ __volatile__(
295 "1: ld [%%g1], %%o4\n\t"
296 " sub %%o4, 1, %%o5\n\t"
297 /* cas [%%g1], %%o4, %%o5 */
298 " .word 0xdbe0500c\n\t"
299 " cmp %%o4, %%o5\n\t"
302 : "=&r" (tmp), "=&r" (ret)
310 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
312 register volatile gint32 *dest asm("g1") = _dest;
313 register gint32 tmp asm("o4");
314 register gint32 ret asm("o5");
316 __asm__ __volatile__(
317 "1: ld [%%g1], %%o4\n\t"
319 /* cas [%%g1], %%o4, %%o5 */
320 " .word 0xdbe0500c\n\t"
321 " cmp %%o4, %%o5\n\t"
324 : "=&r" (tmp), "=&r" (ret)
325 : "r" (dest), "r" (exch)
332 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
334 register volatile gpointer *dest asm("g1") = _dest;
335 register gpointer tmp asm("o4");
336 register gpointer ret asm("o5");
338 __asm__ __volatile__(
340 "1: ldx [%%g1], %%o4\n\t"
342 "1: ld [%%g1], %%o4\n\t"
346 /* casx [%%g1], %%o4, %%o5 */
347 " .word 0xdbf0500c\n\t"
349 /* cas [%%g1], %%o4, %%o5 */
350 " .word 0xdbe0500c\n\t"
352 " cmp %%o4, %%o5\n\t"
355 : "=&r" (tmp), "=&r" (ret)
356 : "r" (dest), "r" (exch)
363 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
365 register volatile gint32 *dest asm("g1") = _dest;
366 register gint32 tmp asm("o4");
367 register gint32 ret asm("o5");
369 __asm__ __volatile__(
370 "1: ld [%%g1], %%o4\n\t"
371 " add %%o4, %3, %%o5\n\t"
372 /* cas [%%g1], %%o4, %%o5 */
373 " .word 0xdbe0500c\n\t"
374 " cmp %%o4, %%o5\n\t"
376 " add %%o5, %3, %%o5"
377 : "=&r" (tmp), "=&r" (ret)
378 : "r" (dest), "r" (add)
387 InterlockedCompareExchange(volatile gint32 *dest,
388 gint32 exch, gint32 comp)
392 __asm__ __volatile__ ("\tLA\t1,%0\n"
395 : "+m" (*dest), "=&r" (old)
396 : "r" (exch), "r" (comp)
401 static inline gpointer
402 InterlockedCompareExchangePointer(volatile gpointer *dest,
408 __asm__ __volatile__ ("\tLA\t1,%0\n"
410 "\tCSG\t%1,%2,0(1)\n"
411 : "+m" (*dest), "=&r" (old)
412 : "r" (exch), "r" (comp)
419 InterlockedIncrement(volatile gint32 *val)
423 __asm__ __volatile__ ("\tLA\t2,%1\n"
430 : "=r" (tmp), "+m" (*val)
437 InterlockedDecrement(volatile gint32 *val)
441 __asm__ __volatile__ ("\tLA\t2,%1\n"
448 : "=r" (tmp), "+m" (*val)
455 InterlockedExchange(volatile gint32 *val, gint32 new_val)
459 __asm__ __volatile__ ("\tLA\t1,%0\n"
463 : "+m" (*val), "=&r" (ret)
470 static inline gpointer
471 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
475 __asm__ __volatile__ ("\tLA\t1,%0\n"
477 "\tCSG\t%1,%2,0(1)\n"
479 : "+m" (*val), "=&r" (ret)
487 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
491 __asm__ __volatile__ ("\tLA\t2,%1\n"
497 : "=&r" (ret), "+m" (*val)
504 #elif defined(__mono_ppc__)
506 #ifdef G_COMPILER_CODEWARRIOR
507 static inline gint32 InterlockedIncrement(volatile register gint32 *val)
509 gint32 result = 0, tmp;
510 register gint32 result = 0;
518 stwcx. result, 0, val
525 static inline gint32 InterlockedDecrement(register volatile gint32 *val)
527 register gint32 result = 0;
535 stwcx. result, 0, val
541 #define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
543 static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
545 register gint32 tmp = 0;
560 static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
562 register gint32 tmp = 0;
574 #define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
577 #if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
578 #define LDREGX "ldarx"
579 #define STREGCXD "stdcx."
580 #define CMPREG "cmpd"
582 #define LDREGX "lwarx"
583 #define STREGCXD "stwcx."
584 #define CMPREG "cmpw"
587 static inline gint32 InterlockedIncrement(volatile gint32 *val)
589 gint32 result = 0, tmp;
591 __asm__ __volatile__ ("\n1:\n\t"
592 "lwarx %0, 0, %2\n\t"
594 "stwcx. %1, 0, %2\n\t"
596 : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
600 static inline gint32 InterlockedDecrement(volatile gint32 *val)
602 gint32 result = 0, tmp;
604 __asm__ __volatile__ ("\n1:\n\t"
605 "lwarx %0, 0, %2\n\t"
606 "addi %1, %0, -1\n\t"
607 "stwcx. %1, 0, %2\n\t"
609 : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
613 static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
614 gpointer exch, gpointer comp)
618 __asm__ __volatile__ ("\n1:\n\t"
619 LDREGX " %0, 0, %1\n\t"
622 STREGCXD " %3, 0, %1\n\t"
626 : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
630 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
631 gint32 exch, gint32 comp) {
634 __asm__ __volatile__ ("\n1:\n\t"
635 "lwarx %0, 0, %1\n\t"
638 "stwcx. %3, 0, %1\n\t"
642 : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
646 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
650 __asm__ __volatile__ ("\n1:\n\t"
651 "lwarx %0, 0, %2\n\t"
652 "stwcx. %3, 0, %2\n\t"
654 : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
658 static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
662 __asm__ __volatile__ ("\n1:\n\t"
663 LDREGX " %0, 0, %2\n\t"
664 STREGCXD " %3, 0, %2\n\t"
666 : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
670 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
673 __asm__ __volatile__ ("\n1:\n\t"
674 "lwarx %0, 0, %2\n\t"
676 "stwcx. %1, 0, %2\n\t"
678 : "=&r" (result), "=&r" (tmp)
679 : "r" (dest), "r" (add) : "cc", "memory");
687 #endif /* !G_COMPILER_CODEWARRIOR */
689 #elif defined(__arm__)
691 #ifdef __native_client__
692 #define MASK_REGISTER(reg, cond) "bic" cond " " reg ", " reg ", #0xc0000000\n"
693 #define NACL_ALIGN() ".align 4\n"
695 #define MASK_REGISTER(reg, cond)
700 * Atomic operations on ARM doesn't contain memory barriers, and the runtime code
701 * depends on this, so we add them explicitly.
704 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
706 #if defined(HAVE_ARMV7)
708 __asm__ __volatile__ ( "1:\n"
713 MASK_REGISTER("%2", "al")
718 MASK_REGISTER("%2", "eq")
719 "strexeq %0, %4, [%2]\n"
723 : "=&r" (tmp), "=&r" (ret)
724 : "r" (dest), "r" (comp), "r" (exch)
731 __asm__ __volatile__ ( "0:\n\t"
733 MASK_REGISTER("%2", "al")
739 MASK_REGISTER("%2", "al")
740 "swp %0, %3, [%2]\n\t"
743 MASK_REGISTER("%2", "ne")
744 "swpne %3, %0, [%2]\n\t"
747 : "=&r" (a), "=&r" (b)
748 : "r" (dest), "r" (exch), "r" (comp)
755 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
757 #if defined(HAVE_ARMV7)
759 __asm__ __volatile__ (
765 MASK_REGISTER("%2", "al")
770 MASK_REGISTER("%2", "eq")
771 "strexeq %0, %4, [%2]\n"
775 : "=&r" (tmp), "=&r" (ret)
776 : "r" (dest), "r" (comp), "r" (exch)
783 __asm__ __volatile__ ( "0:\n\t"
785 MASK_REGISTER("%2", "al")
791 MASK_REGISTER("%2", "eq")
792 "swpeq %0, %3, [%2]\n\t"
795 MASK_REGISTER("%2", "ne")
796 "swpne %3, %0, [%2]\n\t"
799 : "=&r" (a), "=&r" (b)
800 : "r" (dest), "r" (exch), "r" (comp)
807 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
809 #if defined(HAVE_ARMV7)
811 __asm__ __volatile__ (
815 MASK_REGISTER("%2", "al")
819 MASK_REGISTER("%2", "al")
820 "strex %1, %0, [%2]\n"
824 : "=&r" (ret), "=&r" (flag)
825 : "r" (dest), "r" (1)
832 __asm__ __volatile__ ( "0:\n\t"
834 MASK_REGISTER("%3", "al")
838 MASK_REGISTER("%3", "al")
839 "swp %2, %1, [%3]\n\t"
842 MASK_REGISTER("%3", "ne")
843 "swpne %1, %2, [%3]\n\t"
845 : "=&r" (a), "=&r" (b), "=&r" (c)
846 : "r" (dest), "r" (1)
853 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
855 #if defined(HAVE_ARMV7)
857 __asm__ __volatile__ (
861 MASK_REGISTER("%2", "al")
865 MASK_REGISTER("%2", "al")
866 "strex %1, %0, [%2]\n"
870 : "=&r" (ret), "=&r" (flag)
871 : "r" (dest), "r" (1)
878 __asm__ __volatile__ ( "0:\n\t"
880 MASK_REGISTER("%3", "al")
884 MASK_REGISTER("%3", "al")
885 "swp %2, %1, [%3]\n\t"
888 MASK_REGISTER("%3", "ne")
889 "swpne %1, %2, [%3]\n\t"
891 : "=&r" (a), "=&r" (b), "=&r" (c)
892 : "r" (dest), "r" (-1)
899 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
901 #if defined(HAVE_ARMV7)
903 __asm__ __volatile__ (
907 MASK_REGISTER("%3", "al")
910 MASK_REGISTER("%3", "al")
911 "strex %1, %2, [%3]\n"
915 : "=&r" (ret), "=&r" (flag)
916 : "r" (exch), "r" (dest)
922 __asm__ __volatile__ ( NACL_ALIGN()
923 MASK_REGISTER("%1", "al")
926 : "r" (dest), "r" (exch));
932 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
934 #if defined(HAVE_ARMV7)
936 __asm__ __volatile__ (
940 MASK_REGISTER("%3", "al")
943 MASK_REGISTER("%3", "al")
944 "strex %1, %2, [%3]\n"
948 : "=&r" (ret), "=&r" (flag)
949 : "r" (exch), "r" (dest)
955 __asm__ __volatile__ ( NACL_ALIGN()
956 MASK_REGISTER("%1", "al")
959 : "r" (dest), "r" (exch));
965 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
967 #if defined(HAVE_ARMV7)
968 gint32 ret, tmp, flag;
969 __asm__ __volatile__ (
973 MASK_REGISTER("%3", "al")
977 MASK_REGISTER("%3", "al")
978 "strex %2, %1, [%3]\n"
982 : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
983 : "r" (dest), "r" (add)
990 __asm__ __volatile__ ( "0:\n\t"
992 MASK_REGISTER("%3", "al")
996 MASK_REGISTER("%3", "al")
997 "swp %2, %1, [%3]\n\t"
1000 MASK_REGISTER("%3", "ne")
1001 "swpne %1, %2, [%3]\n\t"
1003 : "=&r" (a), "=&r" (b), "=&r" (c)
1004 : "r" (dest), "r" (add)
1012 #undef MASK_REGISTER
1014 #elif defined(__ia64__)
1016 #ifdef __INTEL_COMPILER
1017 #include <ia64intrin.h>
1020 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
1021 gint32 exch, gint32 comp)
1026 #ifdef __INTEL_COMPILER
1027 old = _InterlockedCompareExchange (dest, exch, comp);
1029 /* cmpxchg4 zero extends the value read from memory */
1030 real_comp = (guint64)(guint32)comp;
1031 asm volatile ("mov ar.ccv = %2 ;;\n\t"
1032 "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
1033 : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
1039 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
1040 gpointer exch, gpointer comp)
1044 #ifdef __INTEL_COMPILER
1045 old = _InterlockedCompareExchangePointer (dest, exch, comp);
1047 asm volatile ("mov ar.ccv = %2 ;;\n\t"
1048 "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
1049 : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
1055 static inline gint32 InterlockedIncrement(gint32 volatile *val)
1057 #ifdef __INTEL_COMPILER
1058 return _InterlockedIncrement (val);
1064 } while (InterlockedCompareExchange (val, old + 1, old) != old);
1070 static inline gint32 InterlockedDecrement(gint32 volatile *val)
1072 #ifdef __INTEL_COMPILER
1073 return _InterlockedDecrement (val);
1079 } while (InterlockedCompareExchange (val, old - 1, old) != old);
1085 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
1087 #ifdef __INTEL_COMPILER
1088 return _InterlockedExchange (dest, new_val);
1094 } while (InterlockedCompareExchange (dest, new_val, res) != res);
1100 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
1102 #ifdef __INTEL_COMPILER
1103 return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
1109 } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
1115 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
1119 #ifdef __INTEL_COMPILER
1120 old = _InterlockedExchangeAdd (val, add);
1124 } while (InterlockedCompareExchange (val, old + add, old) != old);
1130 #elif defined(__mips__)
1132 #if SIZEOF_REGISTER == 8
1133 #error "Not implemented."
1136 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1138 gint32 tmp, result = 0;
1140 __asm__ __volatile__ (" .set mips32\n"
1146 : "=&r" (result), "=&r" (tmp), "=m" (*val)
1151 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1153 gint32 tmp, result = 0;
1155 __asm__ __volatile__ (" .set mips32\n"
1161 : "=&r" (result), "=&r" (tmp), "=m" (*val)
1166 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1167 gint32 exch, gint32 comp) {
1170 __asm__ __volatile__ (" .set mips32\n"
1177 : "=&r" (old), "=&r" (tmp), "=m" (*dest)
1178 : "m" (*dest), "r" (exch), "r" (comp));
1182 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1184 return (gpointer)(InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp)));
1187 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
1191 __asm__ __volatile__ (" .set mips32\n"
1197 : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1198 : "m" (*dest), "r" (exch));
1202 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
1204 return (gpointer)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch));
1207 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1211 __asm__ __volatile__ (" .set mips32\n"
1213 " addu %1, %0, %4\n"
1217 : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1218 : "m" (*dest), "r" (add));
1224 #define WAPI_NO_ATOMIC_ASM
1226 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1227 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1228 extern gint32 InterlockedIncrement(volatile gint32 *dest);
1229 extern gint32 InterlockedDecrement(volatile gint32 *dest);
1230 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1231 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1232 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1236 #endif /* _WAPI_ATOMIC_H_ */