*
* (C) 2002 Ximian, Inc.
* Copyright 2012 Xamarin Inc
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef _WAPI_ATOMIC_H_
#define _WAPI_ATOMIC_H_
-#if defined(__NetBSD__)
-#include <sys/param.h>
-
-#if __NetBSD_Version__ > 499004000
-#include <sys/atomic.h>
-#define HAVE_ATOMIC_OPS
-#endif
-
-#endif
-
#include "config.h"
#include <glib.h>
+#include <mono/utils/mono-membar.h>
+
+/*
+The current Nexus 7 arm-v7a fails with:
+F/MonoDroid( 1568): shared runtime initialization error: Cannot load library: reloc_library[1285]: 37 cannot locate '__sync_val_compare_and_swap_8'
+
+Apple targets have historically being problematic, xcode 4.6 would miscompile the intrinsic.
+*/
/* On Windows, we always use the functions provided by the Windows API. */
#if defined(__WIN32__) || defined(_WIN32)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
#include <windows.h>
-/* Prefer GCC atomic ops if the target supports it (see configure.in). */
-#elif defined(USE_GCC_ATOMIC_OPS)
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
- gint32 exch, gint32 comp)
+/* mingw is missing InterlockedCompareExchange64 () from winbase.h */
+#if HAVE_DECL_INTERLOCKEDCOMPAREEXCHANGE64==0
+static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
{
return __sync_val_compare_and_swap (dest, comp, exch);
}
+#endif
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
-{
- return __sync_val_compare_and_swap (dest, comp, exch);
-}
-
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
-{
- return __sync_add_and_fetch (val, 1);
-}
-
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
-{
- return __sync_add_and_fetch (val, -1);
-}
-
-static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
-{
- gint32 old_val;
- do {
- old_val = *val;
- } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
- return old_val;
-}
-
-static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
- gpointer new_val)
+/* mingw is missing InterlockedExchange64 () from winbase.h */
+#if HAVE_DECL_INTERLOCKEDEXCHANGE64==0
+static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
{
- gpointer old_val;
+ gint64 old_val;
do {
old_val = *val;
- } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
+ } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
return old_val;
}
+#endif
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
-{
- return __sync_fetch_and_add (val, add);
-}
-
-#elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
- gint32 exch, gint32 comp)
+/* mingw is missing InterlockedIncrement64 () from winbase.h */
+#if HAVE_DECL_INTERLOCKEDINCREMENT64==0
+static inline gint64 InterlockedIncrement64(volatile gint64 *val)
{
- return atomic_cas_32((uint32_t*)dest, comp, exch);
+ return __sync_add_and_fetch (val, 1);
}
+#endif
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
+/* mingw is missing InterlockedDecrement64 () from winbase.h */
+#if HAVE_DECL_INTERLOCKEDDECREMENT64==0
+static inline gint64 InterlockedDecrement64(volatile gint64 *val)
{
- return atomic_cas_ptr(dest, comp, exch);
+ return __sync_sub_and_fetch (val, 1);
}
+#endif
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
+/* mingw is missing InterlockedAdd () from winbase.h */
+#if HAVE_DECL_INTERLOCKEDADD==0
+static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
{
- return atomic_inc_32_nv((uint32_t*)val);
+ return __sync_add_and_fetch (dest, add);
}
+#endif
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
+/* mingw is missing InterlockedAdd64 () from winbase.h */
+#if HAVE_DECL_INTERLOCKEDADD64==0
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
{
- return atomic_dec_32_nv((uint32_t*)val);
+ return __sync_add_and_fetch (dest, add);
}
+#endif
-static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
+#if defined(_MSC_VER) && !defined(InterlockedAdd)
+/* MSVC before 2013 only defines InterlockedAdd* for the Itanium architecture */
+static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
{
- return atomic_swap_32((uint32_t*)val, new_val);
+ return InterlockedExchangeAdd (dest, add) + add;
}
+#endif
-static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
- gpointer new_val)
+#if defined(_MSC_VER) && !defined(InterlockedAdd64)
+#if defined(InterlockedExchangeAdd64)
+/* This may be defined only on amd64 */
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
{
- return atomic_swap_ptr(val, new_val);
+ return InterlockedExchangeAdd64 (dest, add) + add;
}
-
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
+#else
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
{
- return atomic_add_32_nv((uint32_t*)val, add) - add;
-}
+ gint64 prev_value;
-#elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
+ do {
+ prev_value = *dest;
+ } while (prev_value != InterlockedCompareExchange64(dest, prev_value + add, prev_value));
-G_GNUC_UNUSED
-static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
-{
- register volatile gint32 *dest asm("g1") = _dest;
- register gint32 comp asm("o4") = _comp;
- register gint32 exch asm("o5") = _exch;
-
- __asm__ __volatile__(
- /* cas [%%g1], %%o4, %%o5 */
- ".word 0xdbe0500c"
- : "=r" (exch)
- : "0" (exch), "r" (dest), "r" (comp)
- : "memory");
-
- return exch;
+ return prev_value + add;
}
+#endif
+#endif
-G_GNUC_UNUSED
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
-{
- register volatile gpointer *dest asm("g1") = _dest;
- register gpointer comp asm("o4") = _comp;
- register gpointer exch asm("o5") = _exch;
-
- __asm__ __volatile__(
-#ifdef SPARCV9
- /* casx [%%g1], %%o4, %%o5 */
- ".word 0xdbf0500c"
+#ifdef HOST_WIN32
+#define TO_INTERLOCKED_ARGP(ptr) ((volatile LONG*)(ptr))
#else
- /* cas [%%g1], %%o4, %%o5 */
- ".word 0xdbe0500c"
+#define TO_INTERLOCKED_ARGP(ptr) (ptr)
#endif
- : "=r" (exch)
- : "0" (exch), "r" (dest), "r" (comp)
- : "memory");
- return exch;
-}
+/* And now for some dirty hacks... The Windows API doesn't
+ * provide any useful primitives for this (other than getting
+ * into architecture-specific madness), so use CAS. */
-G_GNUC_UNUSED
-static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
+static inline gint32 InterlockedRead(volatile gint32 *src)
{
- register volatile gint32 *dest asm("g1") = _dest;
- register gint32 tmp asm("o4");
- register gint32 ret asm("o5");
-
- __asm__ __volatile__(
- "1: ld [%%g1], %%o4\n\t"
- " add %%o4, 1, %%o5\n\t"
- /* cas [%%g1], %%o4, %%o5 */
- " .word 0xdbe0500c\n\t"
- " cmp %%o4, %%o5\n\t"
- " bne 1b\n\t"
- " add %%o5, 1, %%o5"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest)
- : "memory", "cc");
-
- return ret;
+ return InterlockedCompareExchange (TO_INTERLOCKED_ARGP (src), 0, 0);
}
-G_GNUC_UNUSED
-static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
+static inline gint64 InterlockedRead64(volatile gint64 *src)
{
- register volatile gint32 *dest asm("g1") = _dest;
- register gint32 tmp asm("o4");
- register gint32 ret asm("o5");
-
- __asm__ __volatile__(
- "1: ld [%%g1], %%o4\n\t"
- " sub %%o4, 1, %%o5\n\t"
- /* cas [%%g1], %%o4, %%o5 */
- " .word 0xdbe0500c\n\t"
- " cmp %%o4, %%o5\n\t"
- " bne 1b\n\t"
- " sub %%o5, 1, %%o5"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest)
- : "memory", "cc");
-
- return ret;
+ return InterlockedCompareExchange64 (src, 0, 0);
}
-G_GNUC_UNUSED
-static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
+static inline gpointer InterlockedReadPointer(volatile gpointer *src)
{
- register volatile gint32 *dest asm("g1") = _dest;
- register gint32 tmp asm("o4");
- register gint32 ret asm("o5");
-
- __asm__ __volatile__(
- "1: ld [%%g1], %%o4\n\t"
- " mov %3, %%o5\n\t"
- /* cas [%%g1], %%o4, %%o5 */
- " .word 0xdbe0500c\n\t"
- " cmp %%o4, %%o5\n\t"
- " bne 1b\n\t"
- " nop"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest), "r" (exch)
- : "memory", "cc");
-
- return ret;
+ return InterlockedCompareExchangePointer (src, NULL, NULL);
}
-G_GNUC_UNUSED
-static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
+static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
{
- register volatile gpointer *dest asm("g1") = _dest;
- register gpointer tmp asm("o4");
- register gpointer ret asm("o5");
-
- __asm__ __volatile__(
-#ifdef SPARCV9
- "1: ldx [%%g1], %%o4\n\t"
-#else
- "1: ld [%%g1], %%o4\n\t"
-#endif
- " mov %3, %%o5\n\t"
-#ifdef SPARCV9
- /* casx [%%g1], %%o4, %%o5 */
- " .word 0xdbf0500c\n\t"
-#else
- /* cas [%%g1], %%o4, %%o5 */
- " .word 0xdbe0500c\n\t"
-#endif
- " cmp %%o4, %%o5\n\t"
- " bne 1b\n\t"
- " nop"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest), "r" (exch)
- : "memory", "cc");
-
- return ret;
+ InterlockedExchange (TO_INTERLOCKED_ARGP (dst), val);
}
-G_GNUC_UNUSED
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
+static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
{
- register volatile gint32 *dest asm("g1") = _dest;
- register gint32 tmp asm("o4");
- register gint32 ret asm("o5");
-
- __asm__ __volatile__(
- "1: ld [%%g1], %%o4\n\t"
- " add %%o4, %3, %%o5\n\t"
- /* cas [%%g1], %%o4, %%o5 */
- " .word 0xdbe0500c\n\t"
- " cmp %%o4, %%o5\n\t"
- " bne 1b\n\t"
- " add %%o5, %3, %%o5"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest), "r" (add)
- : "memory", "cc");
-
- return ret;
+ InterlockedExchange64 (dst, val);
}
-#elif __s390x__
-
-static inline gint32
-InterlockedCompareExchange(volatile gint32 *dest,
- gint32 exch, gint32 comp)
+static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val)
{
- gint32 old;
-
- __asm__ __volatile__ ("\tLA\t1,%0\n"
- "\tLR\t%1,%3\n"
- "\tCS\t%1,%2,0(1)\n"
- : "+m" (*dest), "=&r" (old)
- : "r" (exch), "r" (comp)
- : "1", "cc");
- return(old);
+ InterlockedExchangePointer (dst, val);
}
-static inline gpointer
-InterlockedCompareExchangePointer(volatile gpointer *dest,
- gpointer exch,
- gpointer comp)
-{
- gpointer old;
-
- __asm__ __volatile__ ("\tLA\t1,%0\n"
- "\tLGR\t%1,%3\n"
- "\tCSG\t%1,%2,0(1)\n"
- : "+m" (*dest), "=&r" (old)
- : "r" (exch), "r" (comp)
- : "1", "cc");
-
- return(old);
-}
+/* We can't even use CAS for these, so write them out
+ * explicitly according to x86(_64) semantics... */
-static inline gint32
-InterlockedIncrement(volatile gint32 *val)
+static inline gint8 InterlockedRead8(volatile gint8 *src)
{
- gint32 tmp;
-
- __asm__ __volatile__ ("\tLA\t2,%1\n"
- "0:\tLGF\t%0,%1\n"
- "\tLGFR\t1,%0\n"
- "\tAGHI\t1,1\n"
- "\tCS\t%0,1,0(2)\n"
- "\tJNZ\t0b\n"
- "\tLGFR\t%0,1"
- : "=r" (tmp), "+m" (*val)
- : : "1", "2", "cc");
-
- return(tmp);
+ return *src;
}
-static inline gint32
-InterlockedDecrement(volatile gint32 *val)
+static inline gint16 InterlockedRead16(volatile gint16 *src)
{
- gint32 tmp;
-
- __asm__ __volatile__ ("\tLA\t2,%1\n"
- "0:\tLGF\t%0,%1\n"
- "\tLGFR\t1,%0\n"
- "\tAGHI\t1,-1\n"
- "\tCS\t%0,1,0(2)\n"
- "\tJNZ\t0b\n"
- "\tLGFR\t%0,1"
- : "=r" (tmp), "+m" (*val)
- : : "1", "2", "cc");
-
- return(tmp);
+ return *src;
}
-static inline gint32
-InterlockedExchange(volatile gint32 *val, gint32 new_val)
+static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val)
{
- gint32 ret;
-
- __asm__ __volatile__ ("\tLA\t1,%0\n"
- "0:\tL\t%1,%0\n"
- "\tCS\t%1,%2,0(1)\n"
- "\tJNZ\t0b"
- : "+m" (*val), "=&r" (ret)
- : "r" (new_val)
- : "1", "cc");
-
- return(ret);
+ *dst = val;
+ mono_memory_barrier ();
}
-static inline gpointer
-InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
+static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val)
{
- gpointer ret;
-
- __asm__ __volatile__ ("\tLA\t1,%0\n"
- "0:\tLG\t%1,%0\n"
- "\tCSG\t%1,%2,0(1)\n"
- "\tJNZ\t0b"
- : "+m" (*val), "=&r" (ret)
- : "r" (new_val)
- : "1", "cc");
-
- return(ret);
+ *dst = val;
+ mono_memory_barrier ();
}
-static inline gint32
-InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
-{
- gint32 ret;
-
- __asm__ __volatile__ ("\tLA\t2,%1\n"
- "0:\tLGF\t%0,%1\n"
- "\tLGFR\t1,%0\n"
- "\tAGR\t1,%2\n"
- "\tCS\t%0,1,0(2)\n"
- "\tJNZ\t0b"
- : "=&r" (ret), "+m" (*val)
- : "r" (add)
- : "1", "2", "cc");
-
- return(ret);
-}
+/* Prefer GCC atomic ops if the target supports it (see configure.ac). */
+#elif defined(USE_GCC_ATOMIC_OPS)
-#elif defined(__mono_ppc__)
+/*
+ * As of this comment (August 2016), all current Clang versions get atomic
+ * intrinsics on ARM64 wrong. All GCC versions prior to 5.3.0 do, too. The bug
+ * is the same: The compiler developers thought that the acq + rel barriers
+ * that ARM64 load/store instructions can impose are sufficient to provide
+ * sequential consistency semantics. This is not the case:
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2014-February/229588.html
+ *
+ * We work around this bug by inserting full barriers around each atomic
+ * intrinsic if we detect that we're built with a buggy compiler.
+ */
-#ifdef G_COMPILER_CODEWARRIOR
-static inline gint32 InterlockedIncrement(volatile register gint32 *val)
-{
- gint32 result = 0, tmp;
- register gint32 result = 0;
- register gint32 tmp;
-
- asm
- {
- @1:
- lwarx tmp, 0, val
- addi result, tmp, 1
- stwcx. result, 0, val
- bne- @1
- }
-
- return result;
-}
+#if defined (HOST_ARM64) && (defined (__clang__) || MONO_GNUC_VERSION < 50300)
+#define WRAP_ATOMIC_INTRINSIC(INTRIN) \
+ ({ \
+ mono_memory_barrier (); \
+ __typeof__ (INTRIN) atomic_ret__ = (INTRIN); \
+ mono_memory_barrier (); \
+ atomic_ret__; \
+ })
+
+#define gcc_sync_val_compare_and_swap(a, b, c) WRAP_ATOMIC_INTRINSIC (__sync_val_compare_and_swap (a, b, c))
+#define gcc_sync_add_and_fetch(a, b) WRAP_ATOMIC_INTRINSIC (__sync_add_and_fetch (a, b))
+#define gcc_sync_sub_and_fetch(a, b) WRAP_ATOMIC_INTRINSIC (__sync_sub_and_fetch (a, b))
+#define gcc_sync_fetch_and_add(a, b) WRAP_ATOMIC_INTRINSIC (__sync_fetch_and_add (a, b))
+#else
+#define gcc_sync_val_compare_and_swap(a, b, c) __sync_val_compare_and_swap (a, b, c)
+#define gcc_sync_add_and_fetch(a, b) __sync_add_and_fetch (a, b)
+#define gcc_sync_sub_and_fetch(a, b) __sync_sub_and_fetch (a, b)
+#define gcc_sync_fetch_and_add(a, b) __sync_fetch_and_add (a, b)
+#endif
-static inline gint32 InterlockedDecrement(register volatile gint32 *val)
+static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
+ gint32 exch, gint32 comp)
{
- register gint32 result = 0;
- register gint32 tmp;
-
- asm
- {
- @1:
- lwarx tmp, 0, val
- addi result, tmp, -1
- stwcx. result, 0, val
- bne- @1
- }
-
- return result;
+ return gcc_sync_val_compare_and_swap (dest, comp, exch);
}
-#define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
-static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
+static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
{
- register gint32 tmp = 0;
-
- asm
- {
- @1:
- lwarx tmp, 0, dest
- cmpw tmp, comp
- bne- @2
- stwcx. exch, 0, dest
- bne- @1
- @2:
- }
-
- return tmp;
+ return gcc_sync_val_compare_and_swap (dest, comp, exch);
}
-static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
-{
- register gint32 tmp = 0;
-
- asm
- {
- @1:
- lwarx tmp, 0, dest
- stwcx. exch, 0, dest
- bne- @1
- }
- return tmp;
+static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
+{
+ return gcc_sync_add_and_fetch (dest, add);
}
-#define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
-#else
-
-#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
-#define LDREGX "ldarx"
-#define STREGCXD "stdcx."
-#define CMPREG "cmpd"
-#else
-#define LDREGX "lwarx"
-#define STREGCXD "stwcx."
-#define CMPREG "cmpw"
-#endif
static inline gint32 InterlockedIncrement(volatile gint32 *val)
{
- gint32 result = 0, tmp;
-
- __asm__ __volatile__ ("\n1:\n\t"
- "lwarx %0, 0, %2\n\t"
- "addi %1, %0, 1\n\t"
- "stwcx. %1, 0, %2\n\t"
- "bne- 1b"
- : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
- return result + 1;
+ return gcc_sync_add_and_fetch (val, 1);
}
static inline gint32 InterlockedDecrement(volatile gint32 *val)
{
- gint32 result = 0, tmp;
-
- __asm__ __volatile__ ("\n1:\n\t"
- "lwarx %0, 0, %2\n\t"
- "addi %1, %0, -1\n\t"
- "stwcx. %1, 0, %2\n\t"
- "bne- 1b"
- : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
- return result - 1;
+ return gcc_sync_sub_and_fetch (val, 1);
}
-static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
- gpointer exch, gpointer comp)
+static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
{
- gpointer tmp = NULL;
-
- __asm__ __volatile__ ("\n1:\n\t"
- LDREGX " %0, 0, %1\n\t"
- CMPREG " %0, %2\n\t"
- "bne- 2f\n\t"
- STREGCXD " %3, 0, %1\n\t"
- "bne- 1b\n"
- "2:"
- : "=&r" (tmp)
- : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
- return(tmp);
+ gint32 old_val;
+ do {
+ old_val = *val;
+ } while (gcc_sync_val_compare_and_swap (val, old_val, new_val) != old_val);
+ return old_val;
}
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
- gint32 exch, gint32 comp) {
- gint32 tmp = 0;
-
- __asm__ __volatile__ ("\n1:\n\t"
- "lwarx %0, 0, %1\n\t"
- "cmpw %0, %2\n\t"
- "bne- 2f\n\t"
- "stwcx. %3, 0, %1\n\t"
- "bne- 1b\n"
- "2:"
- : "=&r" (tmp)
- : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
- return(tmp);
+static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
+ gpointer new_val)
+{
+ gpointer old_val;
+ do {
+ old_val = *val;
+ } while (gcc_sync_val_compare_and_swap (val, old_val, new_val) != old_val);
+ return old_val;
}
-static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
+static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
{
- gint32 tmp = 0;
-
- __asm__ __volatile__ ("\n1:\n\t"
- "lwarx %0, 0, %2\n\t"
- "stwcx. %3, 0, %2\n\t"
- "bne 1b"
- : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
- return(tmp);
+ return gcc_sync_fetch_and_add (val, add);
}
-static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
+static inline gint8 InterlockedRead8(volatile gint8 *src)
{
- gpointer tmp = NULL;
-
- __asm__ __volatile__ ("\n1:\n\t"
- LDREGX " %0, 0, %2\n\t"
- STREGCXD " %3, 0, %2\n\t"
- "bne 1b"
- : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
- return(tmp);
+ /* Kind of a hack, but GCC doesn't give us anything better, and it's
+ * certainly not as bad as using a CAS loop. */
+ return gcc_sync_fetch_and_add (src, 0);
}
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
+static inline gint16 InterlockedRead16(volatile gint16 *src)
{
- gint32 result, tmp;
- __asm__ __volatile__ ("\n1:\n\t"
- "lwarx %0, 0, %2\n\t"
- "add %1, %0, %3\n\t"
- "stwcx. %1, 0, %2\n\t"
- "bne 1b"
- : "=&r" (result), "=&r" (tmp)
- : "r" (dest), "r" (add) : "cc", "memory");
- return(result);
+ return gcc_sync_fetch_and_add (src, 0);
}
-#undef LDREGX
-#undef STREGCXD
-#undef CMPREG
-
-#endif /* !G_COMPILER_CODEWARRIOR */
-
-#elif defined(__arm__)
-
-#ifdef __native_client__
-#define MASK_REGISTER(reg, cond) "bic" cond " " reg ", " reg ", #0xc0000000\n"
-#define NACL_ALIGN() ".align 4\n"
-#else
-#define MASK_REGISTER(reg, cond)
-#define NACL_ALIGN()
-#endif
-
-/*
- * Atomic operations on ARM doesn't contain memory barriers, and the runtime code
- * depends on this, so we add them explicitly.
- */
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
+static inline gint32 InterlockedRead(volatile gint32 *src)
{
-#if defined(HAVE_ARMV7)
- gint32 ret, tmp;
- __asm__ __volatile__ ( "1:\n"
- NACL_ALIGN()
- "dmb\n"
- "mov %0, #0\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "ldrex %1, [%2]\n"
- "teq %1, %3\n"
- "it eq\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "eq")
- "strexeq %0, %4, [%2]\n"
- "teq %0, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest), "r" (comp), "r" (exch)
- : "memory", "cc");
-
- return ret;
-#else
- gint32 a, b;
-
- __asm__ __volatile__ ( "0:\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "ldr %1, [%2]\n\t"
- "cmp %1, %4\n\t"
- "mov %0, %1\n\t"
- "bne 1f\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "swp %0, %3, [%2]\n\t"
- "cmp %0, %1\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%2", "ne")
- "swpne %3, %0, [%2]\n\t"
- "bne 0b\n\t"
- "1:"
- : "=&r" (a), "=&r" (b)
- : "r" (dest), "r" (exch), "r" (comp)
- : "cc", "memory");
-
- return a;
-#endif
+ return gcc_sync_fetch_and_add (src, 0);
}
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
+static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val)
{
-#if defined(HAVE_ARMV7)
- gpointer ret, tmp;
- __asm__ __volatile__ (
- "dmb\n"
- "1:\n"
- NACL_ALIGN()
- "mov %0, #0\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "ldrex %1, [%2]\n"
- "teq %1, %3\n"
- "it eq\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "eq")
- "strexeq %0, %4, [%2]\n"
- "teq %0, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (tmp), "=&r" (ret)
- : "r" (dest), "r" (comp), "r" (exch)
- : "memory", "cc");
-
- return ret;
-#else
- gpointer a, b;
-
- __asm__ __volatile__ ( "0:\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "ldr %1, [%2]\n\t"
- "cmp %1, %4\n\t"
- "mov %0, %1\n\t"
- "bne 1f\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%2", "eq")
- "swpeq %0, %3, [%2]\n\t"
- "cmp %0, %1\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%2", "ne")
- "swpne %3, %0, [%2]\n\t"
- "bne 0b\n\t"
- "1:"
- : "=&r" (a), "=&r" (b)
- : "r" (dest), "r" (exch), "r" (comp)
- : "cc", "memory");
-
- return a;
-#endif
+ /* Nothing useful from GCC at all, so fall back to CAS. */
+ gint8 old_val;
+ do {
+ old_val = *dst;
+ } while (gcc_sync_val_compare_and_swap (dst, old_val, val) != old_val);
}
-static inline gint32 InterlockedIncrement(volatile gint32 *dest)
+static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val)
{
-#if defined(HAVE_ARMV7)
- gint32 ret, flag;
- __asm__ __volatile__ (
- "dmb\n"
- "1:\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "ldrex %0, [%2]\n"
- "add %0, %0, %3\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "strex %1, %0, [%2]\n"
- "teq %1, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (ret), "=&r" (flag)
- : "r" (dest), "r" (1)
- : "memory", "cc");
-
- return ret;
-#else
- gint32 a, b, c;
-
- __asm__ __volatile__ ( "0:\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "ldr %0, [%3]\n\t"
- "add %1, %0, %4\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "swp %2, %1, [%3]\n\t"
- "cmp %0, %2\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "ne")
- "swpne %1, %2, [%3]\n\t"
- "bne 0b"
- : "=&r" (a), "=&r" (b), "=&r" (c)
- : "r" (dest), "r" (1)
- : "cc", "memory");
-
- return b;
-#endif
+ gint16 old_val;
+ do {
+ old_val = *dst;
+ } while (gcc_sync_val_compare_and_swap (dst, old_val, val) != old_val);
}
-static inline gint32 InterlockedDecrement(volatile gint32 *dest)
+static inline void InterlockedWrite(volatile gint32 *dst, gint32 val)
{
-#if defined(HAVE_ARMV7)
- gint32 ret, flag;
- __asm__ __volatile__ (
- "dmb\n"
- "1:\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "ldrex %0, [%2]\n"
- "sub %0, %0, %3\n"
- NACL_ALIGN()
- MASK_REGISTER("%2", "al")
- "strex %1, %0, [%2]\n"
- "teq %1, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (ret), "=&r" (flag)
- : "r" (dest), "r" (1)
- : "memory", "cc");
-
- return ret;
-#else
- gint32 a, b, c;
-
- __asm__ __volatile__ ( "0:\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "ldr %0, [%3]\n\t"
- "add %1, %0, %4\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "swp %2, %1, [%3]\n\t"
- "cmp %0, %2\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "ne")
- "swpne %1, %2, [%3]\n\t"
- "bne 0b"
- : "=&r" (a), "=&r" (b), "=&r" (c)
- : "r" (dest), "r" (-1)
- : "cc", "memory");
-
- return b;
-#endif
+ /* Nothing useful from GCC at all, so fall back to CAS. */
+ gint32 old_val;
+ do {
+ old_val = *dst;
+ } while (gcc_sync_val_compare_and_swap (dst, old_val, val) != old_val);
}
-static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
-{
-#if defined(HAVE_ARMV7)
- gint32 ret, flag;
- __asm__ __volatile__ (
- "dmb\n"
- "1:\n"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "ldrex %0, [%3]\n"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "strex %1, %2, [%3]\n"
- "teq %1, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (ret), "=&r" (flag)
- : "r" (exch), "r" (dest)
- : "memory", "cc");
- return ret;
-#else
- gint32 a;
+#if defined (TARGET_OSX) || defined (__arm__) || (defined (__mips__) && !defined (__mips64)) || (defined (__powerpc__) && !defined (__powerpc64__)) || (defined (__sparc__) && !defined (__arch64__))
+#define BROKEN_64BIT_ATOMICS_INTRINSIC 1
+#endif
- __asm__ __volatile__ ( NACL_ALIGN()
- MASK_REGISTER("%1", "al")
- "swp %0, %2, [%1]"
- : "=&r" (a)
- : "r" (dest), "r" (exch));
+#if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
- return a;
-#endif
+static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
+{
+ return gcc_sync_val_compare_and_swap (dest, comp, exch);
}
-static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
{
-#if defined(HAVE_ARMV7)
- gpointer ret, flag;
- __asm__ __volatile__ (
- "dmb\n"
- "1:\n"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "ldrex %0, [%3]\n"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "strex %1, %2, [%3]\n"
- "teq %1, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (ret), "=&r" (flag)
- : "r" (exch), "r" (dest)
- : "memory", "cc");
- return ret;
-#else
- gpointer a;
-
- __asm__ __volatile__ ( NACL_ALIGN()
- MASK_REGISTER("%1", "al")
- "swp %0, %2, [%1]"
- : "=&r" (a)
- : "r" (dest), "r" (exch));
-
- return a;
-#endif
+ return gcc_sync_add_and_fetch (dest, add);
}
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
+static inline gint64 InterlockedIncrement64(volatile gint64 *val)
{
-#if defined(HAVE_ARMV7)
- gint32 ret, tmp, flag;
- __asm__ __volatile__ (
- "dmb\n"
- "1:\n"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "ldrex %0, [%3]\n"
- "add %1, %0, %4\n"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "strex %2, %1, [%3]\n"
- "teq %2, #0\n"
- "bne 1b\n"
- "dmb\n"
- : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
- : "r" (dest), "r" (add)
- : "memory", "cc");
-
- return ret;
-#else
- int a, b, c;
-
- __asm__ __volatile__ ( "0:\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "ldr %0, [%3]\n\t"
- "add %1, %0, %4\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "al")
- "swp %2, %1, [%3]\n\t"
- "cmp %0, %2\n\t"
- NACL_ALIGN()
- MASK_REGISTER("%3", "ne")
- "swpne %1, %2, [%3]\n\t"
- "bne 0b"
- : "=&r" (a), "=&r" (b), "=&r" (c)
- : "r" (dest), "r" (add)
- : "cc", "memory");
-
- return a;
-#endif
+ return gcc_sync_add_and_fetch (val, 1);
}
-#undef NACL_ALIGN
-#undef MASK_REGISTER
-
-#elif defined(__ia64__)
-
-#ifdef __INTEL_COMPILER
-#include <ia64intrin.h>
-#endif
-
-static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
- gint32 exch, gint32 comp)
+static inline gint64 InterlockedDecrement64(volatile gint64 *val)
{
- gint32 old;
- guint64 real_comp;
-
-#ifdef __INTEL_COMPILER
- old = _InterlockedCompareExchange (dest, exch, comp);
-#else
- /* cmpxchg4 zero extends the value read from memory */
- real_comp = (guint64)(guint32)comp;
- asm volatile ("mov ar.ccv = %2 ;;\n\t"
- "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
- : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
-#endif
-
- return(old);
+ return gcc_sync_sub_and_fetch (val, 1);
}
-static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
- gpointer exch, gpointer comp)
+static inline gint64 InterlockedExchangeAdd64(volatile gint64 *val, gint64 add)
{
- gpointer old;
-
-#ifdef __INTEL_COMPILER
- old = _InterlockedCompareExchangePointer (dest, exch, comp);
-#else
- asm volatile ("mov ar.ccv = %2 ;;\n\t"
- "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
- : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
-#endif
-
- return(old);
+ return gcc_sync_fetch_and_add (val, add);
}
-static inline gint32 InterlockedIncrement(gint32 volatile *val)
+static inline gint64 InterlockedRead64(volatile gint64 *src)
{
-#ifdef __INTEL_COMPILER
- return _InterlockedIncrement (val);
-#else
- gint32 old;
-
- do {
- old = *val;
- } while (InterlockedCompareExchange (val, old + 1, old) != old);
-
- return old + 1;
-#endif
+ /* Kind of a hack, but GCC doesn't give us anything better. */
+ return gcc_sync_fetch_and_add (src, 0);
}
-static inline gint32 InterlockedDecrement(gint32 volatile *val)
-{
-#ifdef __INTEL_COMPILER
- return _InterlockedDecrement (val);
#else
- gint32 old;
- do {
- old = *val;
- } while (InterlockedCompareExchange (val, old - 1, old) != old);
+/* Implement 64-bit cmpxchg by hand or emulate it. */
+extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
- return old - 1;
-#endif
-}
+/* Implement all other 64-bit atomics in terms of a specialized CAS
+ * in this case, since chances are that the other 64-bit atomic
+ * intrinsics are broken too.
+ */
-static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
+static inline gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add)
{
-#ifdef __INTEL_COMPILER
- return _InterlockedExchange (dest, new_val);
-#else
- gint32 res;
-
+ gint64 old_val;
do {
- res = *dest;
- } while (InterlockedCompareExchange (dest, new_val, res) != res);
-
- return res;
-#endif
+ old_val = *dest;
+ } while (InterlockedCompareExchange64 (dest, old_val + add, old_val) != old_val);
+ return old_val;
}
-static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
+static inline gint64 InterlockedIncrement64(volatile gint64 *val)
{
-#ifdef __INTEL_COMPILER
- return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
-#else
- gpointer res;
-
+ gint64 get, set;
do {
- res = *dest;
- } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
-
- return res;
-#endif
+ get = *val;
+ set = get + 1;
+ } while (InterlockedCompareExchange64 (val, set, get) != get);
+ return set;
}
-static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
+static inline gint64 InterlockedDecrement64(volatile gint64 *val)
{
- gint32 old;
-
-#ifdef __INTEL_COMPILER
- old = _InterlockedExchangeAdd (val, add);
-#else
+ gint64 get, set;
do {
- old = *val;
- } while (InterlockedCompareExchange (val, old + add, old) != old);
-
- return old;
-#endif
+ get = *val;
+ set = get - 1;
+ } while (InterlockedCompareExchange64 (val, set, get) != get);
+ return set;
}
-#elif defined(__mips__)
-
-#if SIZEOF_REGISTER == 8
-#error "Not implemented."
-#endif
-
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
{
- gint32 tmp, result = 0;
-
- __asm__ __volatile__ (" .set mips32\n"
- "1: ll %0, %2\n"
- " addu %1, %0, 1\n"
- " sc %1, %2\n"
- " beqz %1, 1b\n"
- " .set mips0\n"
- : "=&r" (result), "=&r" (tmp), "=m" (*val)
- : "m" (*val));
- return result + 1;
+ gint64 get, set;
+ do {
+ get = *dest;
+ set = get + add;
+ } while (InterlockedCompareExchange64 (dest, set, get) != get);
+ return set;
}
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
+static inline gint64 InterlockedRead64(volatile gint64 *src)
{
- gint32 tmp, result = 0;
-
- __asm__ __volatile__ (" .set mips32\n"
- "1: ll %0, %2\n"
- " subu %1, %0, 1\n"
- " sc %1, %2\n"
- " beqz %1, 1b\n"
- " .set mips0\n"
- : "=&r" (result), "=&r" (tmp), "=m" (*val)
- : "m" (*val));
- return result - 1;
+ return InterlockedCompareExchange64 (src, 0, 0);
}
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
- gint32 exch, gint32 comp) {
- gint32 old, tmp;
-
- __asm__ __volatile__ (" .set mips32\n"
- "1: ll %0, %2\n"
- " bne %0, %5, 2f\n"
- " move %1, %4\n"
- " sc %1, %2\n"
- " beqz %1, 1b\n"
- "2: .set mips0\n"
- : "=&r" (old), "=&r" (tmp), "=m" (*dest)
- : "m" (*dest), "r" (exch), "r" (comp));
- return(old);
-}
+#endif
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
+static inline gpointer InterlockedReadPointer(volatile gpointer *src)
{
- return (gpointer)(InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp)));
+ return InterlockedCompareExchangePointer (src, NULL, NULL);
}
-static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
+static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val)
{
- gint32 result, tmp;
-
- __asm__ __volatile__ (" .set mips32\n"
- "1: ll %0, %2\n"
- " move %1, %4\n"
- " sc %1, %2\n"
- " beqz %1, 1b\n"
- " .set mips0\n"
- : "=&r" (result), "=&r" (tmp), "=m" (*dest)
- : "m" (*dest), "r" (exch));
- return(result);
+ InterlockedExchangePointer (dst, val);
}
-static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
+/* We always implement this in terms of a 64-bit cmpxchg since
+ * GCC doesn't have an intrisic to model it anyway. */
+static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
{
- return (gpointer)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch));
+ gint64 old_val;
+ do {
+ old_val = *val;
+ } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
+ return old_val;
}
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
+static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
{
- gint32 result, tmp;
-
- __asm__ __volatile__ (" .set mips32\n"
- "1: ll %0, %2\n"
- " addu %1, %0, %4\n"
- " sc %1, %2\n"
- " beqz %1, 1b\n"
- " .set mips0\n"
- : "=&r" (result), "=&r" (tmp), "=m" (*dest)
- : "m" (*dest), "r" (add));
- return result;
+ /* Nothing useful from GCC at all, so fall back to CAS. */
+ InterlockedExchange64 (dst, val);
}
#else
#define WAPI_NO_ATOMIC_ASM
extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
+extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
+extern gint32 InterlockedAdd(volatile gint32 *dest, gint32 add);
+extern gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add);
extern gint32 InterlockedIncrement(volatile gint32 *dest);
+extern gint64 InterlockedIncrement64(volatile gint64 *dest);
extern gint32 InterlockedDecrement(volatile gint32 *dest);
+extern gint64 InterlockedDecrement64(volatile gint64 *dest);
extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
+extern gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch);
extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
+extern gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add);
+extern gint8 InterlockedRead8(volatile gint8 *src);
+extern gint16 InterlockedRead16(volatile gint16 *src);
+extern gint32 InterlockedRead(volatile gint32 *src);
+extern gint64 InterlockedRead64(volatile gint64 *src);
+extern gpointer InterlockedReadPointer(volatile gpointer *src);
+extern void InterlockedWrite8(volatile gint8 *dst, gint8 val);
+extern void InterlockedWrite16(volatile gint16 *dst, gint16 val);
+extern void InterlockedWrite(volatile gint32 *dst, gint32 val);
+extern void InterlockedWrite64(volatile gint64 *dst, gint64 val);
+extern void InterlockedWritePointer(volatile gpointer *dst, gpointer val);
#endif