[sgen] Jobs for scanning from registered roots.
[mono.git] / mono / io-layer / atomic.h
index ff8d3a455f845d0cab84946810ccbc94f3d162b2..e45cfcf99b023116e656004b1454caa2fbc5f791 100644 (file)
 #ifndef _WAPI_ATOMIC_H_
 #define _WAPI_ATOMIC_H_
 
+#if defined(__NetBSD__)
+#include <sys/param.h>
+
+#if __NetBSD_Version__ > 499004000
+#include <sys/atomic.h>
+#define HAVE_ATOMIC_OPS
+#endif
+
+#endif
+
 #include <glib.h>
 
 #include "mono/io-layer/wapi.h"
 
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
+
+#define WAPI_ATOMIC_ASM
+static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
+       gint32 exch, gint32 comp)
+{
+       return atomic_cas_32((uint32_t*)dest, comp, exch);
+}
+
+static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
+{
+       return atomic_cas_ptr(dest, comp, exch);
+}
+
+static inline gint32 InterlockedIncrement(volatile gint32 *val)
+{
+       return atomic_inc_32_nv((uint32_t*)val);
+}
+
+static inline gint32 InterlockedDecrement(volatile gint32 *val)
+{
+       return atomic_dec_32_nv((uint32_t*)val);
+}
+
+static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
+{
+       return atomic_swap_32((uint32_t*)val, new_val);
+}
+
+static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
+               gpointer new_val)
+{
+       return atomic_swap_ptr(val, new_val);
+}
+
+static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
+{
+       return atomic_add_32_nv((uint32_t*)val, add) - add;
+}
+
+#elif defined(__i386__) || defined(__x86_64__)
 #define WAPI_ATOMIC_ASM
 
 /*
@@ -42,7 +92,7 @@ static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest
        gpointer old;
 
        __asm__ __volatile__ ("lock; "
-#ifdef __x86_64__
+#if defined(__x86_64__)  && !defined(__native_client__)
                              "cmpxchgq"
 #else
                              "cmpxchgl"
@@ -104,7 +154,7 @@ static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
        gpointer ret;
        
        __asm__ __volatile__ ("1:; lock; "
-#ifdef __x86_64__
+#if defined(__x86_64__)  && !defined(__native_client__)
                              "cmpxchgq"
 #else
                              "cmpxchgl"
@@ -301,7 +351,7 @@ InterlockedCompareExchange(volatile gint32 *dest,
        __asm__ __volatile__ ("\tLA\t1,%0\n"
                              "\tLR\t%1,%3\n"
                              "\tCS\t%1,%2,0(1)\n"
-                             : "+m" (*dest), "=r" (old)
+                             : "+m" (*dest), "=&r" (old)
                              : "r" (exch), "r" (comp)
                              : "1", "cc");     
        return(old);
@@ -317,7 +367,7 @@ InterlockedCompareExchangePointer(volatile gpointer *dest,
        __asm__ __volatile__ ("\tLA\t1,%0\n"
                              "\tLR\t%1,%3\n"
                              "\tCS\t%1,%2,0(1)\n"
-                             : "+m" (*dest), "=r" (old)
+                             : "+m" (*dest), "=&r" (old)
                              : "r" (exch), "r" (comp)
                              : "1", "cc");     
        return(old);
@@ -333,7 +383,7 @@ InterlockedCompareExchangePointer(volatile gpointer *dest,
        __asm__ __volatile__ ("\tLA\t1,%0\n"
                              "\tLGR\t%1,%3\n"
                              "\tCSG\t%1,%2,0(1)\n"
-                             : "+m" (*dest), "=r" (old)
+                             : "+m" (*dest), "=&r" (old)
                              : "r" (exch), "r" (comp)
                              : "1", "cc");
 
@@ -426,7 +476,7 @@ InterlockedExchange(volatile gint32 *val, gint32 new_val)
                              "0:\tL\t%1,%0\n"
                              "\tCS\t%1,%2,0(1)\n"
                              "\tJNZ\t0b"
-                             : "+m" (*val), "=r" (ret)
+                             : "+m" (*val), "=&r" (ret)
                              : "r" (new_val)
                              : "1", "cc");
 
@@ -443,7 +493,7 @@ InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
                              "0:\tL\t%1,%0\n"
                              "\tCS\t%1,%2,0(1)\n"
                              "\tJNZ\t0b"
-                             : "+m" (*val), "=r" (ret)
+                             : "+m" (*val), "=&r" (ret)
                              : "r" (new_val)
                              : "1", "cc");
 
@@ -459,7 +509,7 @@ InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
                              "0:\tLG\t%1,%0\n"
                              "\tCSG\t%1,%2,0(1)\n"
                              "\tJNZ\t0b"
-                             : "+m" (*val), "=r" (ret)
+                             : "+m" (*val), "=&r" (ret)
                              : "r" (new_val)
                              : "1", "cc");
 
@@ -479,7 +529,7 @@ InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
                              "\tAR\t1,%2\n"
                              "\tCS\t%0,1,0(2)\n"
                              "\tJNZ\t0b"
-                             : "=r" (ret), "+m" (*val)
+                             : "=&r" (ret), "+m" (*val)
                              : "r" (add) 
                              : "1", "2", "cc");
        
@@ -497,7 +547,7 @@ InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
                              "\tAGR\t1,%2\n"
                              "\tCS\t%0,1,0(2)\n"
                              "\tJNZ\t0b"
-                             : "=r" (ret), "+m" (*val)
+                             : "=&r" (ret), "+m" (*val)
                              : "r" (add) 
                              : "1", "2", "cc");
        
@@ -505,9 +555,90 @@ InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
 }
 # endif
 
-#elif defined(__ppc__) || defined (__powerpc__)
+#elif defined(__mono_ppc__)
 #define WAPI_ATOMIC_ASM
 
+#ifdef G_COMPILER_CODEWARRIOR
+static inline gint32 InterlockedIncrement(volatile register gint32 *val)
+{
+       gint32 result = 0, tmp;
+       register gint32 result = 0;
+       register gint32 tmp;
+
+       asm
+       {
+               @1:
+                       lwarx   tmp, 0, val
+                       addi    result, tmp, 1
+                       stwcx.  result, 0, val
+                       bne-    @1
+       }
+       return result;
+}
+
+static inline gint32 InterlockedDecrement(register volatile gint32 *val)
+{
+       register gint32 result = 0;
+       register gint32 tmp;
+
+       asm
+       {
+               @1:
+                       lwarx   tmp, 0, val
+                       addi    result, tmp, -1
+                       stwcx.  result, 0, val
+                       bne-    @1
+       }
+
+       return result;
+}
+#define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
+
+static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
+{
+       register gint32 tmp = 0;
+
+       asm
+       {
+               @1:
+                       lwarx   tmp, 0, dest
+                       cmpw    tmp, comp
+                       bne-    @2
+                       stwcx.  exch, 0, dest
+                       bne-    @1
+               @2:
+       }
+
+       return tmp;
+}
+static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
+{
+       register gint32 tmp = 0;
+
+       asm
+       {
+               @1:
+                       lwarx   tmp, 0, dest
+                       stwcx.  exch, 0, dest
+                       bne-    @1
+       }
+
+       return tmp;
+}
+#define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
+#else
+
+#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
+#define LDREGX "ldarx"
+#define STREGCXD "stdcx."
+#define CMPREG "cmpd"
+#else
+#define LDREGX "lwarx"
+#define STREGCXD "stwcx."
+#define CMPREG "cmpw"
+#endif
+
 static inline gint32 InterlockedIncrement(volatile gint32 *val)
 {
        gint32 result = 0, tmp;
@@ -534,7 +665,22 @@ static inline gint32 InterlockedDecrement(volatile gint32 *val)
        return result - 1;
 }
 
-#define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
+static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
+                                               gpointer exch, gpointer comp)
+{
+       gpointer tmp = NULL;
+
+       __asm__ __volatile__ ("\n1:\n\t"
+                            LDREGX " %0, 0, %1\n\t"
+                            CMPREG " %0, %2\n\t" 
+                            "bne-    2f\n\t"
+                            STREGCXD " %3, 0, %1\n\t"
+                            "bne-    1b\n"
+                            "2:"
+                            : "=&r" (tmp)
+                            : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
+       return(tmp);
+}
 
 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
                                                gint32 exch, gint32 comp) {
@@ -563,7 +709,18 @@ static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
                              : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
        return(tmp);
 }
-#define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
+
+static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
+{
+       gpointer tmp = NULL;
+
+       __asm__ __volatile__ ("\n1:\n\t"
+                             LDREGX " %0, 0, %2\n\t"
+                             STREGCXD " %3, 0, %2\n\t"
+                             "bne    1b"
+                             : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
+       return(tmp);
+}
 
 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
 {
@@ -578,12 +735,34 @@ static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
         return(result);
 }
 
+#undef LDREGX
+#undef STREGCXD
+#undef CMPREG
+
+#endif /* !G_COMPILER_CODEWARRIOR */
+
 #elif defined(__arm__)
 #define WAPI_ATOMIC_ASM
 
 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
 {
-       int a, b;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gint32 ret, tmp;
+       __asm__ __volatile__ (  "1:\n"
+                               "mov    %0, #0\n"
+                               "ldrex %1, [%2]\n"
+                               "teq    %1, %3\n"
+                               "it eq\n"
+                               "strexeq %0, %4, [%2]\n"
+                               "teq %0, #0\n"
+                               "bne 1b\n"
+                               : "=&r" (tmp), "=&r" (ret)
+                               : "r" (dest), "r" (comp), "r" (exch)
+                               : "memory", "cc");
+
+       return ret;
+#else
+       gint32 a, b;
 
        __asm__ __volatile__ (    "0:\n\t"
                                  "ldr %1, [%2]\n\t"
@@ -600,10 +779,27 @@ static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 ex
                                  : "cc", "memory");
 
        return a;
+#endif
 }
 
 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
 {
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gpointer ret, tmp;
+       __asm__ __volatile__ (  "1:\n"
+                               "mov    %0, #0\n"
+                               "ldrex %1, [%2]\n"
+                               "teq    %1, %3\n"
+                               "it eq\n"
+                               "strexeq %0, %4, [%2]\n"
+                               "teq %0, #0\n"
+                               "bne 1b\n"
+                               : "=&r" (tmp), "=&r" (ret)
+                               : "r" (dest), "r" (comp), "r" (exch)
+                               : "memory", "cc");
+
+       return ret;
+#else
        gpointer a, b;
 
        __asm__ __volatile__ (    "0:\n\t"
@@ -621,11 +817,26 @@ static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest
                                  : "cc", "memory");
 
        return a;
+#endif
 }
 
 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
 {
-       int a, b, c;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gint32 ret, flag;
+       __asm__ __volatile__ (  "1:\n"
+                               "ldrex %0, [%2]\n"
+                               "add %0, %0, %3\n"
+                               "strex %1, %0, [%2]\n"
+                               "teq %1, #0\n"
+                               "bne 1b\n"
+                               : "=&r" (ret), "=&r" (flag)
+                               : "r" (dest), "r" (1)
+                               : "memory", "cc");
+
+       return ret;
+#else
+       gint32 a, b, c;
 
        __asm__ __volatile__ (  "0:\n\t"
                                "ldr %0, [%3]\n\t"
@@ -639,11 +850,26 @@ static inline gint32 InterlockedIncrement(volatile gint32 *dest)
                                : "cc", "memory");
 
        return b;
+#endif
 }
 
 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
 {
-       int a, b, c;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gint32 ret, flag;
+       __asm__ __volatile__ (  "1:\n"
+                               "ldrex %0, [%2]\n"
+                               "sub %0, %0, %3\n"
+                               "strex %1, %0, [%2]\n"
+                               "teq %1, #0\n"
+                               "bne 1b\n"
+                               : "=&r" (ret), "=&r" (flag)
+                               : "r" (dest), "r" (1)
+                               : "memory", "cc");
+
+       return ret;
+#else
+       gint32 a, b, c;
 
        __asm__ __volatile__ (  "0:\n\t"
                                "ldr %0, [%3]\n\t"
@@ -657,21 +883,49 @@ static inline gint32 InterlockedDecrement(volatile gint32 *dest)
                                : "cc", "memory");
 
        return b;
+#endif
 }
 
 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
 {
-       int a;
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gint32 ret, flag;
+       __asm__ __volatile__ (
+                             "1:\n"
+                             "ldrex %0, [%3]\n"
+                             "strex %1, %2, [%3]\n"
+                             "teq %1, #0\n"
+                             "bne 1b\n"
+                             : "=&r" (ret), "=&r" (flag)
+                             : "r" (exch), "r" (dest)
+                             : "memory", "cc");
+       return ret;
+#else
+       gint32 a;
 
        __asm__ __volatile__ (  "swp %0, %2, [%1]"
                                : "=&r" (a)
                                : "r" (dest), "r" (exch));
 
        return a;
+#endif
 }
 
 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
 {
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gpointer ret, flag;
+       __asm__ __volatile__ (
+                             "1:\n"
+                             "ldrex %0, [%3]\n"
+                             "strex %1, %2, [%3]\n"
+                             "teq %1, #0\n"
+                             "bne 1b\n"
+                             : "=&r" (ret), "=&r" (flag)
+                             : "r" (exch), "r" (dest)
+                             : "memory", "cc");
+       return ret;
+#else
        gpointer a;
 
        __asm__ __volatile__ (  "swp %0, %2, [%1]"
@@ -679,10 +933,25 @@ static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpoin
                                : "r" (dest), "r" (exch));
 
        return a;
+#endif
 }
 
 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
 {
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
+       gint32 ret, tmp, flag;
+       __asm__ __volatile__ (  "1:\n"
+                               "ldrex %0, [%3]\n"
+                               "add %1, %0, %4\n"
+                               "strex %2, %1, [%3]\n"
+                               "teq %2, #0\n"
+                               "bne 1b\n"
+                               : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
+                               : "r" (dest), "r" (add)
+                               : "memory", "cc");
+
+       return ret;
+#else
        int a, b, c;
 
        __asm__ __volatile__ (  "0:\n\t"
@@ -697,6 +966,7 @@ static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
                                : "cc", "memory");
 
        return a;
+#endif
 }
 
 #elif defined(__ia64__)
@@ -710,13 +980,16 @@ static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
                                                gint32 exch, gint32 comp)
 {
        gint32 old;
+       guint64 real_comp;
 
 #ifdef __INTEL_COMPILER
        old = _InterlockedCompareExchange (dest, exch, comp);
 #else
+       /* cmpxchg4 zero extends the value read from memory */
+       real_comp = (guint64)(guint32)comp;
        asm volatile ("mov ar.ccv = %2 ;;\n\t"
                                  "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
-                                 : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
+                                 : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
 #endif
 
        return(old);
@@ -924,6 +1197,88 @@ static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
        return(ret);
 }
 
+#elif defined(__mips__)
+#define WAPI_ATOMIC_ASM
+
+static inline gint32 InterlockedIncrement(volatile gint32 *val)
+{
+       gint32 tmp, result = 0;
+
+       __asm__ __volatile__ ("    .set    mips32\n"
+                             "1:  ll      %0, %2\n"
+                             "    addu    %1, %0, 1\n"
+                              "    sc      %1, %2\n"
+                             "    beqz    %1, 1b\n"
+                             "    .set    mips0\n"
+                             : "=&r" (result), "=&r" (tmp), "=m" (*val)
+                             : "m" (*val));
+       return result + 1;
+}
+
+static inline gint32 InterlockedDecrement(volatile gint32 *val)
+{
+       gint32 tmp, result = 0;
+
+       __asm__ __volatile__ ("    .set    mips32\n"
+                             "1:  ll      %0, %2\n"
+                             "    subu    %1, %0, 1\n"
+                              "    sc      %1, %2\n"
+                             "    beqz    %1, 1b\n"
+                             "    .set    mips0\n"
+                             : "=&r" (result), "=&r" (tmp), "=m" (*val)
+                             : "m" (*val));
+       return result - 1;
+}
+
+#define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
+
+static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
+                                               gint32 exch, gint32 comp) {
+       gint32 old, tmp;
+
+       __asm__ __volatile__ ("    .set    mips32\n"
+                             "1:  ll      %0, %2\n"
+                             "    bne     %0, %5, 2f\n"
+                             "    move    %1, %4\n"
+                              "    sc      %1, %2\n"
+                             "    beqz    %1, 1b\n"
+                             "2:  .set    mips0\n"
+                             : "=&r" (old), "=&r" (tmp), "=m" (*dest)
+                             : "m" (*dest), "r" (exch), "r" (comp));
+       return(old);
+}
+
+static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
+{
+       gint32 result, tmp;
+
+       __asm__ __volatile__ ("    .set    mips32\n"
+                             "1:  ll      %0, %2\n"
+                             "    move    %1, %4\n"
+                              "    sc      %1, %2\n"
+                             "    beqz    %1, 1b\n"
+                             "    .set    mips0\n"
+                             : "=&r" (result), "=&r" (tmp), "=m" (*dest)
+                             : "m" (*dest), "r" (exch));
+       return(result);
+}
+#define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
+
+static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
+{
+        gint32 result, tmp;
+
+       __asm__ __volatile__ ("    .set    mips32\n"
+                             "1:  ll      %0, %2\n"
+                             "    addu    %1, %0, %4\n"
+                              "    sc      %1, %2\n"
+                             "    beqz    %1, 1b\n"
+                             "    .set    mips0\n"
+                             : "=&r" (result), "=&r" (tmp), "=m" (*dest)
+                             : "m" (*dest), "r" (add));
+        return result;
+}
+
 #else
 
 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
@@ -934,7 +1289,7 @@ extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
 
-#if defined(__hpux) && !defined(__GNUC__)
+#if defined(__hppa__)
 #define WAPI_ATOMIC_ASM
 #endif