return(old);
}
+gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
+{
+ gint32 ret;
+ int thr_ret;
+
+ mono_once(&spin_once, spin_init);
+
+ pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
+ (void *)&spin);
+ thr_ret = pthread_mutex_lock(&spin);
+ g_assert (thr_ret == 0);
+
+ *dest += add;
+ ret= *dest;
+
+ thr_ret = pthread_mutex_unlock(&spin);
+ g_assert (thr_ret == 0);
+
+ pthread_cleanup_pop (0);
+
+ return(ret);
+}
+
+gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
+{
+ gint64 ret;
+ int thr_ret;
+
+ mono_once(&spin_once, spin_init);
+
+ pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
+ (void *)&spin);
+ thr_ret = pthread_mutex_lock(&spin);
+ g_assert (thr_ret == 0);
+
+ *dest += add;
+ ret= *dest;
+
+ thr_ret = pthread_mutex_unlock(&spin);
+ g_assert (thr_ret == 0);
+
+ pthread_cleanup_pop (0);
+
+ return(ret);
+}
+
gint32 InterlockedIncrement(volatile gint32 *dest)
{
gint32 ret;
return(ret);
}
+gint64 InterlockedIncrement64(volatile gint64 *dest)
+{
+ gint64 ret;
+ int thr_ret;
+
+ mono_once(&spin_once, spin_init);
+
+ pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
+ (void *)&spin);
+ thr_ret = pthread_mutex_lock(&spin);
+ g_assert (thr_ret == 0);
+
+ (*dest)++;
+ ret= *dest;
+
+ thr_ret = pthread_mutex_unlock(&spin);
+ g_assert (thr_ret == 0);
+
+ pthread_cleanup_pop (0);
+
+ return(ret);
+}
+
gint32 InterlockedDecrement(volatile gint32 *dest)
{
gint32 ret;
return(ret);
}
+gint64 InterlockedDecrement64(volatile gint64 *dest)
+{
+ gint64 ret;
+ int thr_ret;
+
+ mono_once(&spin_once, spin_init);
+
+ pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
+ (void *)&spin);
+ thr_ret = pthread_mutex_lock(&spin);
+ g_assert (thr_ret == 0);
+
+ (*dest)--;
+ ret= *dest;
+
+ thr_ret = pthread_mutex_unlock(&spin);
+ g_assert (thr_ret == 0);
+
+ pthread_cleanup_pop (0);
+
+ return(ret);
+}
+
gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
{
gint32 ret;
return(ret);
}
+gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch)
+{
+ gint64 ret;
+ int thr_ret;
+
+ mono_once(&spin_once, spin_init);
+
+ pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
+ (void *)&spin);
+ thr_ret = pthread_mutex_lock(&spin);
+ g_assert (thr_ret == 0);
+
+ ret=*dest;
+ *dest=exch;
+
+ thr_ret = pthread_mutex_unlock(&spin);
+ g_assert (thr_ret == 0);
+
+ pthread_cleanup_pop (0);
+
+ return(ret);
+}
+
gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
{
gpointer ret;
return(ret);
}
+gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add)
+{
+ gint64 ret;
+ int thr_ret;
+
+ mono_once(&spin_once, spin_init);
+
+ pthread_cleanup_push ((void(*)(void *))pthread_mutex_unlock,
+ (void *)&spin);
+ thr_ret = pthread_mutex_lock(&spin);
+ g_assert (thr_ret == 0);
+
+ ret= *dest;
+ *dest+=add;
+
+ thr_ret = pthread_mutex_unlock(&spin);
+ g_assert (thr_ret == 0);
+
+ pthread_cleanup_pop (0);
+
+ return(ret);
+}
+
#define NEED_64BIT_CMPXCHG_FALLBACK
#endif
return __sync_val_compare_and_swap (dest, comp, exch);
}
+static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add)
+{
+ return __sync_add_and_fetch (dest, add);
+}
+
static inline gint32 InterlockedIncrement(volatile gint32 *val)
{
return __sync_add_and_fetch (val, 1);
return __sync_val_compare_and_swap (dest, comp, exch);
}
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
+{
+ return __sync_add_and_fetch (dest, add);
+}
+
+static inline gint64 InterlockedIncrement64(volatile gint64 *val)
+{
+ return __sync_add_and_fetch (val, 1);
+}
+
+static inline gint64 InterlockedDecrement64(volatile gint64 *val)
+{
+ return __sync_sub_and_fetch (val, 1);
+}
+
+static inline gint64 InterlockedExchangeAdd64(volatile gint64 *val, gint64 add)
+{
+ return __sync_fetch_and_add (val, add);
+}
+
+#else
+
+/* Implement 64-bit cmpxchg by hand or emulate it. */
+extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
+
+/* Implement all other 64-bit atomics in terms of a specialized CAS
+ * in this case, since chances are that the other 64-bit atomic
+ * intrinsics are broken too.
+ */
+
+static inline gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add)
+{
+ gint64 old_val;
+ do {
+ old_val = *dest;
+ } while (InterlockedCompareExchange64 (dest, old_val + add, old_val) != old_val);
+ return old_val;
+}
+
+static inline gint64 InterlockedIncrement64(volatile gint64 *val)
+{
+ gint64 get, set;
+ do {
+ get = *val;
+ set = get + 1;
+ } while (InterlockedCompareExchange64 (val, set, get) != set);
+ return set;
+}
+
+static inline gint64 InterlockedDecrement64(volatile gint64 *val)
+{
+ gint64 get, set;
+ do {
+ get = *val;
+ set = get - 1;
+ } while (InterlockedCompareExchange64 (val, set, get) != set);
+ return set;
+}
+
+static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add)
+{
+ gint64 get, set;
+ do {
+ get = *dest;
+ set = get + add;
+ } while (InterlockedCompareExchange64 (dest, set, get) != set);
+ return set;
+}
+
+static inline gint64 InterlockedRead64(volatile gint64 *src)
+{
+ return InterlockedCompareExchange64 (src, 0, 0);
+}
+
#endif
+/* We always implement this in terms of a 64-bit cmpxchg since
+ * GCC doesn't have an intrisic to model it anyway. */
+static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val)
+{
+ gint64 old_val;
+ do {
+ old_val = *val;
+ } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val);
+ return old_val;
+}
+
+static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val)
+{
+ /* Nothing useful from GCC at all, so fall back to CAS. */
+ InterlockedExchange64 (dst, val);
+}
+
#elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
G_GNUC_UNUSED
#define WAPI_NO_ATOMIC_ASM
extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
+extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
+extern gint32 InterlockedAdd(volatile gint32 *dest, gint32 add);
+extern gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add);
extern gint32 InterlockedIncrement(volatile gint32 *dest);
+extern gint64 InterlockedIncrement64(volatile gint64 *dest);
extern gint32 InterlockedDecrement(volatile gint32 *dest);
+extern gint64 InterlockedDecrement64(volatile gint64 *dest);
extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
+extern gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch);
extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
-
-#endif
-
-#if defined (WAPI_NO_ATOMIC_ASM) || defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
-
-extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
+extern gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add);
#endif