#ifndef _WAPI_ATOMIC_H_
#define _WAPI_ATOMIC_H_
+#if defined(__NetBSD__)
+#include <sys/param.h>
+
+#if __NetBSD_Version__ > 499004000
+#include <sys/atomic.h>
+#define HAVE_ATOMIC_OPS
+#endif
+
+#endif
+
#include <glib.h>
#include "mono/io-layer/wapi.h"
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
+
+#define WAPI_ATOMIC_ASM
+static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
+ gint32 exch, gint32 comp)
+{
+ return atomic_cas_32((uint32_t*)dest, comp, exch);
+}
+
+static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
+{
+ return atomic_cas_ptr(dest, comp, exch);
+}
+
+static inline gint32 InterlockedIncrement(volatile gint32 *val)
+{
+ return atomic_inc_32_nv((uint32_t*)val);
+}
+
+static inline gint32 InterlockedDecrement(volatile gint32 *val)
+{
+ return atomic_dec_32_nv((uint32_t*)val);
+}
+
+static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
+{
+ return atomic_swap_32((uint32_t*)val, new_val);
+}
+
+static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
+ gpointer new_val)
+{
+ return atomic_swap_ptr(val, new_val);
+}
+
+static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
+{
+ return atomic_add_32_nv((uint32_t*)val, add) - add;
+}
+
+#elif defined(__i386__) || defined(__x86_64__)
#define WAPI_ATOMIC_ASM
/*
gpointer old;
__asm__ __volatile__ ("lock; "
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__native_client__)
"cmpxchgq"
#else
"cmpxchgl"
gpointer ret;
__asm__ __volatile__ ("1:; lock; "
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__native_client__)
"cmpxchgq"
#else
"cmpxchgl"
#define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
#else
-#ifdef __mono_ppc64__
+#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
#define LDREGX "ldarx"
#define STREGCXD "stdcx."
#define CMPREG "cmpd"
static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
{
- int a, b;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gint32 ret, tmp;
+ __asm__ __volatile__ ( "1:\n"
+ "mov %0, #0\n"
+ "ldrex %1, [%2]\n"
+ "teq %1, %3\n"
+ "it eq\n"
+ "strexeq %0, %4, [%2]\n"
+ "teq %0, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (dest), "r" (comp), "r" (exch)
+ : "memory", "cc");
+
+ return ret;
+#else
+ gint32 a, b;
__asm__ __volatile__ ( "0:\n\t"
"ldr %1, [%2]\n\t"
: "cc", "memory");
return a;
+#endif
}
static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
{
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gpointer ret, tmp;
+ __asm__ __volatile__ ( "1:\n"
+ "mov %0, #0\n"
+ "ldrex %1, [%2]\n"
+ "teq %1, %3\n"
+ "it eq\n"
+ "strexeq %0, %4, [%2]\n"
+ "teq %0, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (dest), "r" (comp), "r" (exch)
+ : "memory", "cc");
+
+ return ret;
+#else
gpointer a, b;
__asm__ __volatile__ ( "0:\n\t"
: "cc", "memory");
return a;
+#endif
}
static inline gint32 InterlockedIncrement(volatile gint32 *dest)
{
- int a, b, c;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gint32 ret, flag;
+ __asm__ __volatile__ ( "1:\n"
+ "ldrex %0, [%2]\n"
+ "add %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "teq %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (ret), "=&r" (flag)
+ : "r" (dest), "r" (1)
+ : "memory", "cc");
+
+ return ret;
+#else
+ gint32 a, b, c;
__asm__ __volatile__ ( "0:\n\t"
"ldr %0, [%3]\n\t"
: "cc", "memory");
return b;
+#endif
}
static inline gint32 InterlockedDecrement(volatile gint32 *dest)
{
- int a, b, c;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gint32 ret, flag;
+ __asm__ __volatile__ ( "1:\n"
+ "ldrex %0, [%2]\n"
+ "sub %0, %0, %3\n"
+ "strex %1, %0, [%2]\n"
+ "teq %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (ret), "=&r" (flag)
+ : "r" (dest), "r" (1)
+ : "memory", "cc");
+
+ return ret;
+#else
+ gint32 a, b, c;
__asm__ __volatile__ ( "0:\n\t"
"ldr %0, [%3]\n\t"
: "cc", "memory");
return b;
+#endif
}
static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
{
- int a;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gint32 ret, flag;
+ __asm__ __volatile__ (
+ "1:\n"
+ "ldrex %0, [%3]\n"
+ "strex %1, %2, [%3]\n"
+ "teq %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (ret), "=&r" (flag)
+ : "r" (exch), "r" (dest)
+ : "memory", "cc");
+ return ret;
+#else
+ gint32 a;
__asm__ __volatile__ ( "swp %0, %2, [%1]"
: "=&r" (a)
: "r" (dest), "r" (exch));
return a;
+#endif
}
static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
{
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gpointer ret, flag;
+ __asm__ __volatile__ (
+ "1:\n"
+ "ldrex %0, [%3]\n"
+ "strex %1, %2, [%3]\n"
+ "teq %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (ret), "=&r" (flag)
+ : "r" (exch), "r" (dest)
+ : "memory", "cc");
+ return ret;
+#else
gpointer a;
__asm__ __volatile__ ( "swp %0, %2, [%1]"
: "r" (dest), "r" (exch));
return a;
+#endif
}
static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
{
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+ gint32 ret, tmp, flag;
+ __asm__ __volatile__ ( "1:\n"
+ "ldrex %0, [%3]\n"
+ "add %1, %0, %4\n"
+ "strex %2, %1, [%3]\n"
+ "teq %2, #0\n"
+ "bne 1b\n"
+ : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
+ : "r" (dest), "r" (add)
+ : "memory", "cc");
+
+ return ret;
+#else
int a, b, c;
__asm__ __volatile__ ( "0:\n\t"
: "cc", "memory");
return a;
+#endif
}
#elif defined(__ia64__)