+#elif defined(__ia64__)
+#define WAPI_ATOMIC_ASM
+
+#ifdef __INTEL_COMPILER
+#include <ia64intrin.h>
+#endif
+
+static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
+ gint32 exch, gint32 comp)
+{
+ gint32 old;
+ guint64 real_comp;
+
+#ifdef __INTEL_COMPILER
+ old = _InterlockedCompareExchange (dest, exch, comp);
+#else
+ /* cmpxchg4 zero extends the value read from memory */
+ real_comp = (guint64)(guint32)comp;
+ asm volatile ("mov ar.ccv = %2 ;;\n\t"
+ "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
+ : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
+#endif
+
+ return(old);
+}
+
+static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
+ gpointer exch, gpointer comp)
+{
+ gpointer old;
+
+#ifdef __INTEL_COMPILER
+ old = _InterlockedCompareExchangePointer (dest, exch, comp);
+#else
+ asm volatile ("mov ar.ccv = %2 ;;\n\t"
+ "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
+ : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
+#endif
+
+ return(old);
+}
+
+static inline gint32 InterlockedIncrement(gint32 volatile *val)
+{
+#ifdef __INTEL_COMPILER
+ return _InterlockedIncrement (val);
+#else
+ gint32 old;
+
+ do {
+ old = *val;
+ } while (InterlockedCompareExchange (val, old + 1, old) != old);
+
+ return old + 1;
+#endif
+}
+
+static inline gint32 InterlockedDecrement(gint32 volatile *val)
+{
+#ifdef __INTEL_COMPILER
+ return _InterlockedDecrement (val);
+#else
+ gint32 old;
+
+ do {
+ old = *val;
+ } while (InterlockedCompareExchange (val, old - 1, old) != old);
+
+ return old - 1;
+#endif
+}
+
+static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
+{
+#ifdef __INTEL_COMPILER
+ return _InterlockedExchange (dest, new_val);
+#else
+ gint32 res;
+
+ do {
+ res = *dest;
+ } while (InterlockedCompareExchange (dest, new_val, res) != res);
+
+ return res;
+#endif
+}
+
+static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
+{
+#ifdef __INTEL_COMPILER
+ return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
+#else
+ gpointer res;
+
+ do {
+ res = *dest;
+ } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
+
+ return res;
+#endif
+}
+
+static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
+{
+ gint32 old;
+
+#ifdef __INTEL_COMPILER
+ old = _InterlockedExchangeAdd (val, add);
+#else
+ do {
+ old = *val;
+ } while (InterlockedCompareExchange (val, old + add, old) != old);
+
+ return old;
+#endif
+}
+
+#elif defined(__alpha__)
+#define WAPI_ATOMIC_ASM
+
+static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
+ gint32 exch, gint32 comp)
+{
+ gint32 old, temp, temp2;
+ long compq = comp, exchq = exch;
+
+ __asm__ __volatile__ (
+ "1: ldl_l %2, %0\n"
+ " mov %2, %1\n"
+ " cmpeq %2, %5, %3\n"
+ " cmovne %3, %4, %2\n"
+ " stl_c %2, %0\n"
+ " beq %2, 1b\n"
+ : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
+ : "r" (exchq), "r" (compq), "m" (*dest));
+ return(old);
+}
+
+static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
+{
+ gpointer old, temp, temp2;
+
+ __asm__ __volatile__ (
+ "1: ldq_l %2, %0\n"
+ " mov %2, %1\n"
+ " cmpeq %2, %5, %3\n"
+ " cmovne %3, %4, %2\n"
+ " stq_c %2, %0\n"
+ " beq %2, 1b\n"
+ : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
+ : "r" (exch), "r" (comp), "m" (*dest));
+ return(old);
+}
+
+static inline gint32 InterlockedIncrement(volatile gint32 *val)
+{
+ gint32 temp, cur;
+
+ __asm__ __volatile__ (
+ "1: ldl_l %0, %1\n"
+ " addl %0, %3, %0\n"
+ " mov %0, %2\n"
+ " stl_c %0, %1\n"
+ " beq %0, 1b\n"
+ : "=&r" (temp), "=m" (*val), "=r" (cur)
+ : "Ir" (1), "m" (*val));
+ return(cur);
+}
+
+static inline gint32 InterlockedDecrement(volatile gint32 *val)
+{
+ gint32 temp, cur;
+
+ __asm__ __volatile__ (
+ "1: ldl_l %0, %1\n"
+ " subl %0, %3, %0\n"
+ " mov %0, %2\n"
+ " stl_c %0, %1\n"
+ " beq %0, 1b\n"
+ : "=&r" (temp), "=m" (*val), "=r" (cur)
+ : "Ir" (1), "m" (*val));
+ return(cur);
+}
+
+static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
+{
+ gint32 ret, temp;
+
+ __asm__ __volatile__ (
+ "1: ldl_l %1, %0\n"
+ " mov %3, %2\n"
+ " stl_c %2, %0\n"
+ " beq %2, 1b\n"
+ : "=m" (*val), "=&r" (ret), "=&r" (temp)
+ : "r" (new_val), "m" (*val));
+ return(ret);
+}
+
+static inline gpointer InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
+{
+ gpointer ret, temp;
+
+ __asm__ __volatile__ (
+ "1: ldq_l %1, %0\n"
+ " mov %3, %2\n"
+ " stq_c %2, %0\n"
+ " beq %2, 1b\n"
+ : "=m" (*val), "=&r" (ret), "=&r" (temp)
+ : "r" (new_val), "m" (*val));
+ return(ret);
+}
+
+static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
+{
+ gint32 ret, temp;
+
+ __asm__ __volatile__ (
+ "1: ldl_l %2, %0\n"
+ " mov %2, %1\n"
+ " addl %2, %3, %2\n"
+ " stl_c %2, %0\n"
+ " beq %2, 1b\n"
+ : "=m" (*val), "=&r" (ret), "=&r" (temp)
+ : "r" (add), "m" (*val));
+
+ return(ret);
+}
+
+#elif defined(__mips__)
+#define WAPI_ATOMIC_ASM
+
+static inline gint32 InterlockedIncrement(volatile gint32 *val)
+{
+ gint32 tmp, result = 0;
+
+ __asm__ __volatile__ (" .set mips32\n"
+ "1: ll %0, %2\n"
+ " addu %1, %0, 1\n"
+ " sc %1, %2\n"
+ " beqz %1, 1b\n"
+ " .set mips0\n"
+ : "=&r" (result), "=&r" (tmp), "=m" (*val)
+ : "m" (*val));
+ return result + 1;
+}
+
+static inline gint32 InterlockedDecrement(volatile gint32 *val)
+{
+ gint32 tmp, result = 0;
+
+ __asm__ __volatile__ (" .set mips32\n"
+ "1: ll %0, %2\n"
+ " subu %1, %0, 1\n"
+ " sc %1, %2\n"
+ " beqz %1, 1b\n"
+ " .set mips0\n"
+ : "=&r" (result), "=&r" (tmp), "=m" (*val)
+ : "m" (*val));
+ return result - 1;
+}
+
+#define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
+
+static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
+ gint32 exch, gint32 comp) {
+ gint32 old, tmp;
+
+ __asm__ __volatile__ (" .set mips32\n"
+ "1: ll %0, %2\n"
+ " bne %0, %5, 2f\n"
+ " move %1, %4\n"
+ " sc %1, %2\n"
+ " beqz %1, 1b\n"
+ "2: .set mips0\n"
+ : "=&r" (old), "=&r" (tmp), "=m" (*dest)
+ : "m" (*dest), "r" (exch), "r" (comp));
+ return(old);
+}
+
+static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
+{
+ gint32 result, tmp;
+
+ __asm__ __volatile__ (" .set mips32\n"
+ "1: ll %0, %2\n"
+ " move %1, %4\n"
+ " sc %1, %2\n"
+ " beqz %1, 1b\n"
+ " .set mips0\n"
+ : "=&r" (result), "=&r" (tmp), "=m" (*dest)
+ : "m" (*dest), "r" (exch));
+ return(result);
+}
+#define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
+
+static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
+{
+ gint32 result, tmp;
+
+ __asm__ __volatile__ (" .set mips32\n"
+ "1: ll %0, %2\n"
+ " addu %1, %0, %4\n"
+ " sc %1, %2\n"
+ " beqz %1, 1b\n"
+ " .set mips0\n"
+ : "=&r" (result), "=&r" (tmp), "=m" (*dest)
+ : "m" (*dest), "r" (add));
+ return result;
+}
+