Add full-aot support for runtime invokes to array Get/Set methods.
[mono.git] / mono / utils / atomic.h
index 94681c5daa7f6b575123dd3bddee276c99397e74..6f02f8f8709008c5fd1261275008e9a0895e8459 100644 (file)
@@ -116,111 +116,6 @@ static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
        return atomic_add_32_nv((uint32_t*)val, add) - add;
 }
 
-#elif defined(__i386__) || defined(__x86_64__)
-
-/*
- * NB: The *Pointer() functions here assume that
- * sizeof(pointer)==sizeof(gint32)
- *
- * NB2: These asm functions assume 486+ (some of the opcodes dont
- * exist on 386).  If this becomes an issue, we can get configure to
- * fall back to the non-atomic C versions of these calls.
- */
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
-                                               gint32 exch, gint32 comp)
-{
-       gint32 old;
-
-       __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
-                             : "=m" (*dest), "=a" (old)
-                             : "r" (exch), "m" (*dest), "a" (comp));   
-       return(old);
-}
-
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
-{
-       gpointer old;
-
-       __asm__ __volatile__ ("lock; "
-#if defined(__x86_64__)  && !defined(__native_client__)
-                             "cmpxchgq"
-#else
-                             "cmpxchgl"
-#endif
-                             " %2, %0"
-                             : "=m" (*dest), "=a" (old)
-                             : "r" (exch), "m" (*dest), "a" (comp));   
-
-       return(old);
-}
-
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
-{
-       gint32 tmp;
-       
-       __asm__ __volatile__ ("lock; xaddl %0, %1"
-                             : "=r" (tmp), "=m" (*val)
-                             : "0" (1), "m" (*val));
-
-       return(tmp+1);
-}
-
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
-{
-       gint32 tmp;
-       
-       __asm__ __volatile__ ("lock; xaddl %0, %1"
-                             : "=r" (tmp), "=m" (*val)
-                             : "0" (-1), "m" (*val));
-
-       return(tmp-1);
-}
-
-/*
- * See
- * http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
- * for the reasons for using cmpxchg and a loop here.
- */
-static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
-{
-       gint32 ret;
-
-       __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
-                             : "=m" (*val), "=a" (ret)
-                             : "r" (new_val), "m" (*val), "a" (*val));
-       return(ret);
-}
-
-static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
-                                                 gpointer new_val)
-{
-       gpointer ret;
-       
-       __asm__ __volatile__ ("1:; lock; "
-#if defined(__x86_64__)  && !defined(__native_client__)
-                             "cmpxchgq"
-#else
-                             "cmpxchgl"
-#endif
-                             " %2, %0; jne 1b"
-                             : "=m" (*val), "=a" (ret)
-                             : "r" (new_val), "m" (*val), "a" (*val));
-
-       return(ret);
-}
-
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
-{
-       gint32 ret;
-       
-       __asm__ __volatile__ ("lock; xaddl %0, %1"
-                             : "=r" (ret), "=m" (*val)
-                             : "0" (add), "m" (*val));
-       
-       return(ret);
-}
-
 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
 
 G_GNUC_UNUSED 
@@ -501,516 +396,6 @@ InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
        return(ret);
 }
 
-#elif defined(__mono_ppc__)
-
-#ifdef G_COMPILER_CODEWARRIOR
-static inline gint32 InterlockedIncrement(volatile register gint32 *val)
-{
-       gint32 result = 0, tmp;
-       register gint32 result = 0;
-       register gint32 tmp;
-
-       asm
-       {
-               @1:
-                       lwarx   tmp, 0, val
-                       addi    result, tmp, 1
-                       stwcx.  result, 0, val
-                       bne-    @1
-       }
-       return result;
-}
-
-static inline gint32 InterlockedDecrement(register volatile gint32 *val)
-{
-       register gint32 result = 0;
-       register gint32 tmp;
-
-       asm
-       {
-               @1:
-                       lwarx   tmp, 0, val
-                       addi    result, tmp, -1
-                       stwcx.  result, 0, val
-                       bne-    @1
-       }
-
-       return result;
-}
-#define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
-
-static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
-{
-       register gint32 tmp = 0;
-
-       asm
-       {
-               @1:
-                       lwarx   tmp, 0, dest
-                       cmpw    tmp, comp
-                       bne-    @2
-                       stwcx.  exch, 0, dest
-                       bne-    @1
-               @2:
-       }
-
-       return tmp;
-}
-static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
-{
-       register gint32 tmp = 0;
-
-       asm
-       {
-               @1:
-                       lwarx   tmp, 0, dest
-                       stwcx.  exch, 0, dest
-                       bne-    @1
-       }
-
-       return tmp;
-}
-#define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
-#else
-
-#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
-#define LDREGX "ldarx"
-#define STREGCXD "stdcx."
-#define CMPREG "cmpd"
-#else
-#define LDREGX "lwarx"
-#define STREGCXD "stwcx."
-#define CMPREG "cmpw"
-#endif
-
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
-{
-       gint32 result = 0, tmp;
-
-       __asm__ __volatile__ ("\n1:\n\t"
-                             "lwarx  %0, 0, %2\n\t"
-                             "addi   %1, %0, 1\n\t"
-                              "stwcx. %1, 0, %2\n\t"
-                             "bne-   1b"
-                             : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
-       return result + 1;
-}
-
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
-{
-       gint32 result = 0, tmp;
-
-       __asm__ __volatile__ ("\n1:\n\t"
-                             "lwarx  %0, 0, %2\n\t"
-                             "addi   %1, %0, -1\n\t"
-                              "stwcx. %1, 0, %2\n\t"
-                             "bne-   1b"
-                             : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
-       return result - 1;
-}
-
-static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
-                                               gpointer exch, gpointer comp)
-{
-       gpointer tmp = NULL;
-
-       __asm__ __volatile__ ("\n1:\n\t"
-                            LDREGX " %0, 0, %1\n\t"
-                            CMPREG " %0, %2\n\t" 
-                            "bne-    2f\n\t"
-                            STREGCXD " %3, 0, %1\n\t"
-                            "bne-    1b\n"
-                            "2:"
-                            : "=&r" (tmp)
-                            : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
-       return(tmp);
-}
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
-                                               gint32 exch, gint32 comp) {
-       gint32 tmp = 0;
-
-       __asm__ __volatile__ ("\n1:\n\t"
-                            "lwarx   %0, 0, %1\n\t"
-                            "cmpw    %0, %2\n\t" 
-                            "bne-    2f\n\t"
-                            "stwcx.  %3, 0, %1\n\t"
-                            "bne-    1b\n"
-                            "2:"
-                            : "=&r" (tmp)
-                            : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
-       return(tmp);
-}
-
-static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
-{
-       gint32 tmp = 0;
-
-       __asm__ __volatile__ ("\n1:\n\t"
-                             "lwarx  %0, 0, %2\n\t"
-                             "stwcx. %3, 0, %2\n\t"
-                             "bne    1b"
-                             : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
-       return(tmp);
-}
-
-static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
-{
-       gpointer tmp = NULL;
-
-       __asm__ __volatile__ ("\n1:\n\t"
-                             LDREGX " %0, 0, %2\n\t"
-                             STREGCXD " %3, 0, %2\n\t"
-                             "bne    1b"
-                             : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
-       return(tmp);
-}
-
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
-{
-        gint32 result, tmp;
-        __asm__ __volatile__ ("\n1:\n\t"
-                              "lwarx  %0, 0, %2\n\t"
-                              "add    %1, %0, %3\n\t"
-                              "stwcx. %1, 0, %2\n\t"
-                              "bne    1b"
-                              : "=&r" (result), "=&r" (tmp)
-                              : "r" (dest), "r" (add) : "cc", "memory");
-        return(result);
-}
-
-#undef LDREGX
-#undef STREGCXD
-#undef CMPREG
-
-#endif /* !G_COMPILER_CODEWARRIOR */
-
-#elif defined(__arm__)
-
-#ifdef __native_client__
-#define MASK_REGISTER(reg, cond) "bic" cond " " reg ", " reg ", #0xc0000000\n"
-#define NACL_ALIGN() ".align 4\n"
-#else
-#define MASK_REGISTER(reg, cond)
-#define NACL_ALIGN()
-#endif
-
-/*
- * Atomic operations on ARM doesn't contain memory barriers, and the runtime code
- * depends on this, so we add them explicitly.
- */
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
-{
-#if defined(HAVE_ARMV7)
-       gint32 ret, tmp;
-       __asm__ __volatile__ (  "1:\n"
-                               NACL_ALIGN()
-                               "dmb\n"
-                               "mov    %0, #0\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "al")
-                               "ldrex %1, [%2]\n"
-                               "teq    %1, %3\n"
-                               "it eq\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "eq")
-                               "strexeq %0, %4, [%2]\n"
-                               "teq %0, #0\n"
-                               "bne 1b\n"
-                               "dmb\n"
-                               : "=&r" (tmp), "=&r" (ret)
-                               : "r" (dest), "r" (comp), "r" (exch)
-                               : "memory", "cc");
-
-       return ret;
-#else
-       gint32 a, b;
-
-       __asm__ __volatile__ (    "0:\n\t"
-                                 NACL_ALIGN()
-                                 MASK_REGISTER("%2", "al")
-                                 "ldr %1, [%2]\n\t"
-                                 "cmp %1, %4\n\t"
-                                 "mov %0, %1\n\t"
-                                 "bne 1f\n\t"
-                                 NACL_ALIGN()
-                                 MASK_REGISTER("%2", "al")
-                                 "swp %0, %3, [%2]\n\t"
-                                 "cmp %0, %1\n\t"
-                                 NACL_ALIGN()
-                                 MASK_REGISTER("%2", "ne")
-                                 "swpne %3, %0, [%2]\n\t"
-                                 "bne 0b\n\t"
-                                 "1:"
-                                 : "=&r" (a), "=&r" (b)
-                                 : "r" (dest), "r" (exch), "r" (comp)
-                                 : "cc", "memory");
-
-       return a;
-#endif
-}
-
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
-{
-#if defined(HAVE_ARMV7)
-       gpointer ret, tmp;
-       __asm__ __volatile__ (
-                               "dmb\n"
-                               "1:\n"
-                               NACL_ALIGN()
-                               "mov    %0, #0\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "al")
-                               "ldrex %1, [%2]\n"
-                               "teq    %1, %3\n"
-                               "it eq\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "eq")
-                               "strexeq %0, %4, [%2]\n"
-                               "teq %0, #0\n"
-                               "bne 1b\n"
-                               "dmb\n"
-                               : "=&r" (tmp), "=&r" (ret)
-                               : "r" (dest), "r" (comp), "r" (exch)
-                               : "memory", "cc");
-
-       return ret;
-#else
-       gpointer a, b;
-
-       __asm__ __volatile__ (    "0:\n\t"
-                                 NACL_ALIGN()
-                                 MASK_REGISTER("%2", "al")
-                                 "ldr %1, [%2]\n\t"
-                                 "cmp %1, %4\n\t"
-                                 "mov %0, %1\n\t"
-                                 "bne 1f\n\t"
-                                 NACL_ALIGN()
-                                 MASK_REGISTER("%2", "eq")
-                                 "swpeq %0, %3, [%2]\n\t"
-                                 "cmp %0, %1\n\t"
-                                 NACL_ALIGN()
-                                 MASK_REGISTER("%2", "ne")
-                                 "swpne %3, %0, [%2]\n\t"
-                                 "bne 0b\n\t"
-                                 "1:"
-                                 : "=&r" (a), "=&r" (b)
-                                 : "r" (dest), "r" (exch), "r" (comp)
-                                 : "cc", "memory");
-
-       return a;
-#endif
-}
-
-static inline gint32 InterlockedIncrement(volatile gint32 *dest)
-{
-#if defined(HAVE_ARMV7)
-       gint32 ret, flag;
-       __asm__ __volatile__ (
-                               "dmb\n"
-                               "1:\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "al")
-                               "ldrex %0, [%2]\n"
-                               "add %0, %0, %3\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "al")
-                               "strex %1, %0, [%2]\n"
-                               "teq %1, #0\n"
-                               "bne 1b\n"
-                               "dmb\n"
-                               : "=&r" (ret), "=&r" (flag)
-                               : "r" (dest), "r" (1)
-                               : "memory", "cc");
-
-       return ret;
-#else
-       gint32 a, b, c;
-
-       __asm__ __volatile__ (  "0:\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "ldr %0, [%3]\n\t"
-                               "add %1, %0, %4\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "swp %2, %1, [%3]\n\t"
-                               "cmp %0, %2\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "ne")
-                               "swpne %1, %2, [%3]\n\t"
-                               "bne 0b"
-                               : "=&r" (a), "=&r" (b), "=&r" (c)
-                               : "r" (dest), "r" (1)
-                               : "cc", "memory");
-
-       return b;
-#endif
-}
-
-static inline gint32 InterlockedDecrement(volatile gint32 *dest)
-{
-#if defined(HAVE_ARMV7)
-       gint32 ret, flag;
-       __asm__ __volatile__ (
-                               "dmb\n"
-                               "1:\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "al")
-                               "ldrex %0, [%2]\n"
-                               "sub %0, %0, %3\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%2", "al")
-                               "strex %1, %0, [%2]\n"
-                               "teq %1, #0\n"
-                               "bne 1b\n"
-                               "dmb\n"
-                               : "=&r" (ret), "=&r" (flag)
-                               : "r" (dest), "r" (1)
-                               : "memory", "cc");
-
-       return ret;
-#else
-       gint32 a, b, c;
-
-       __asm__ __volatile__ (  "0:\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "ldr %0, [%3]\n\t"
-                               "add %1, %0, %4\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "swp %2, %1, [%3]\n\t"
-                               "cmp %0, %2\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "ne")
-                               "swpne %1, %2, [%3]\n\t"
-                               "bne 0b"
-                               : "=&r" (a), "=&r" (b), "=&r" (c)
-                               : "r" (dest), "r" (-1)
-                               : "cc", "memory");
-
-       return b;
-#endif
-}
-
-static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
-{
-#if defined(HAVE_ARMV7)
-       gint32 ret, flag;
-       __asm__ __volatile__ (
-                                 "dmb\n"
-                             "1:\n"
-                             NACL_ALIGN()
-                             MASK_REGISTER("%3", "al")
-                             "ldrex %0, [%3]\n"
-                             NACL_ALIGN()
-                             MASK_REGISTER("%3", "al")
-                             "strex %1, %2, [%3]\n"
-                             "teq %1, #0\n"
-                             "bne 1b\n"
-                                 "dmb\n"
-                             : "=&r" (ret), "=&r" (flag)
-                             : "r" (exch), "r" (dest)
-                             : "memory", "cc");
-       return ret;
-#else
-       gint32 a;
-
-       __asm__ __volatile__ (  NACL_ALIGN()
-                               MASK_REGISTER("%1", "al")
-                                "swp %0, %2, [%1]"
-                               : "=&r" (a)
-                               : "r" (dest), "r" (exch));
-
-       return a;
-#endif
-}
-
-static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
-{
-#if defined(HAVE_ARMV7)
-       gpointer ret, flag;
-       __asm__ __volatile__ (
-                                 "dmb\n"
-                             "1:\n"
-                             NACL_ALIGN()
-                             MASK_REGISTER("%3", "al")
-                             "ldrex %0, [%3]\n"
-                             NACL_ALIGN()
-                             MASK_REGISTER("%3", "al")
-                             "strex %1, %2, [%3]\n"
-                             "teq %1, #0\n"
-                             "bne 1b\n"
-                                 "dmb\n"
-                             : "=&r" (ret), "=&r" (flag)
-                             : "r" (exch), "r" (dest)
-                             : "memory", "cc");
-       return ret;
-#else
-       gpointer a;
-
-       __asm__ __volatile__ (  NACL_ALIGN()
-                               MASK_REGISTER("%1", "al")
-                                "swp %0, %2, [%1]"
-                               : "=&r" (a)
-                               : "r" (dest), "r" (exch));
-
-       return a;
-#endif
-}
-
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
-{
-#if defined(HAVE_ARMV7)
-       gint32 ret, tmp, flag;
-       __asm__ __volatile__ (
-                               "dmb\n"
-                               "1:\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "ldrex %0, [%3]\n"
-                               "add %1, %0, %4\n"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "strex %2, %1, [%3]\n"
-                               "teq %2, #0\n"
-                               "bne 1b\n"
-                               "dmb\n"
-                               : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
-                               : "r" (dest), "r" (add)
-                               : "memory", "cc");
-
-       return ret;
-#else
-       int a, b, c;
-
-       __asm__ __volatile__ (  "0:\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "ldr %0, [%3]\n\t"
-                               "add %1, %0, %4\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "al")
-                               "swp %2, %1, [%3]\n\t"
-                               "cmp %0, %2\n\t"
-                               NACL_ALIGN()
-                               MASK_REGISTER("%3", "ne")
-                               "swpne %1, %2, [%3]\n\t"
-                               "bne 0b"
-                               : "=&r" (a), "=&r" (b), "=&r" (c)
-                               : "r" (dest), "r" (add)
-                               : "cc", "memory");
-
-       return a;
-#endif
-}
-
-#undef NACL_ALIGN
-#undef MASK_REGISTER
-
 #elif defined(__ia64__)
 
 #ifdef __INTEL_COMPILER
@@ -1127,98 +512,6 @@ static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
 #endif
 }
 
-#elif defined(__mips__)
-
-#if SIZEOF_REGISTER == 8
-#error "Not implemented."
-#endif
-
-static inline gint32 InterlockedIncrement(volatile gint32 *val)
-{
-       gint32 tmp, result = 0;
-
-       __asm__ __volatile__ ("    .set    mips32\n"
-                             "1:  ll      %0, %2\n"
-                             "    addu    %1, %0, 1\n"
-                              "    sc      %1, %2\n"
-                             "    beqz    %1, 1b\n"
-                             "    .set    mips0\n"
-                             : "=&r" (result), "=&r" (tmp), "=m" (*val)
-                             : "m" (*val));
-       return result + 1;
-}
-
-static inline gint32 InterlockedDecrement(volatile gint32 *val)
-{
-       gint32 tmp, result = 0;
-
-       __asm__ __volatile__ ("    .set    mips32\n"
-                             "1:  ll      %0, %2\n"
-                             "    subu    %1, %0, 1\n"
-                              "    sc      %1, %2\n"
-                             "    beqz    %1, 1b\n"
-                             "    .set    mips0\n"
-                             : "=&r" (result), "=&r" (tmp), "=m" (*val)
-                             : "m" (*val));
-       return result - 1;
-}
-
-static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
-                                               gint32 exch, gint32 comp) {
-       gint32 old, tmp;
-
-       __asm__ __volatile__ ("    .set    mips32\n"
-                             "1:  ll      %0, %2\n"
-                             "    bne     %0, %5, 2f\n"
-                             "    move    %1, %4\n"
-                              "    sc      %1, %2\n"
-                             "    beqz    %1, 1b\n"
-                             "2:  .set    mips0\n"
-                             : "=&r" (old), "=&r" (tmp), "=m" (*dest)
-                             : "m" (*dest), "r" (exch), "r" (comp));
-       return(old);
-}
-
-static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
-{
-       return (gpointer)(InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp)));
-}
-
-static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
-{
-       gint32 result, tmp;
-
-       __asm__ __volatile__ ("    .set    mips32\n"
-                             "1:  ll      %0, %2\n"
-                             "    move    %1, %4\n"
-                              "    sc      %1, %2\n"
-                             "    beqz    %1, 1b\n"
-                             "    .set    mips0\n"
-                             : "=&r" (result), "=&r" (tmp), "=m" (*dest)
-                             : "m" (*dest), "r" (exch));
-       return(result);
-}
-
-static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
-{
-       return (gpointer)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch));
-}
-
-static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
-{
-        gint32 result, tmp;
-
-       __asm__ __volatile__ ("    .set    mips32\n"
-                             "1:  ll      %0, %2\n"
-                             "    addu    %1, %0, %4\n"
-                              "    sc      %1, %2\n"
-                             "    beqz    %1, 1b\n"
-                             "    .set    mips0\n"
-                             : "=&r" (result), "=&r" (tmp), "=m" (*dest)
-                             : "m" (*dest), "r" (add));
-        return result;
-}
-
 #else
 
 #define WAPI_NO_ATOMIC_ASM