X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Futils%2Fatomic.h;h=306800cfe1d0cd508a56aafb341a815de949233c;hb=8c36d04c87713b7251de9c591f551788efa4db7a;hp=b07265954c83cdfca6885a5fa467d3645912e72a;hpb=ec96b2dc2289e00a9c30a8b39b14c3f62ab484ed;p=mono.git diff --git a/mono/utils/atomic.h b/mono/utils/atomic.h index b07265954c8..306800cfe1d 100755 --- a/mono/utils/atomic.h +++ b/mono/utils/atomic.h @@ -21,7 +21,11 @@ /* On Windows, we always use the functions provided by the Windows API. */ #if defined(__WIN32__) || defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif #include +#include /* mingw is missing InterlockedCompareExchange64 () from winbase.h */ #if HAVE_DECL_INTERLOCKEDCOMPAREEXCHANGE64==0 @@ -31,7 +35,145 @@ static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 } #endif -/* Prefer GCC atomic ops if the target supports it (see configure.in). */ +/* mingw is missing InterlockedExchange64 () from winbase.h */ +#if HAVE_DECL_INTERLOCKEDEXCHANGE64==0 +static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val) +{ + gint64 old_val; + do { + old_val = *val; + } while (InterlockedCompareExchange64 (val, new_val, old_val) != old_val); + return old_val; +} +#endif + +/* mingw is missing InterlockedIncrement64 () from winbase.h */ +#if HAVE_DECL_INTERLOCKEDINCREMENT64==0 +static inline gint64 InterlockedIncrement64(volatile gint64 *val) +{ + return __sync_add_and_fetch (val, 1); +} +#endif + +/* mingw is missing InterlockedDecrement64 () from winbase.h */ +#if HAVE_DECL_INTERLOCKEDDECREMENT64==0 +static inline gint64 InterlockedDecrement64(volatile gint64 *val) +{ + return __sync_sub_and_fetch (val, 1); +} +#endif + +/* mingw is missing InterlockedAdd () from winbase.h */ +#if HAVE_DECL_INTERLOCKEDADD==0 +static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add) +{ + return __sync_add_and_fetch (dest, add); +} +#endif + +/* mingw is missing InterlockedAdd64 () from winbase.h */ +#if HAVE_DECL_INTERLOCKEDADD64==0 +static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add) +{ + return __sync_add_and_fetch (dest, add); +} +#endif + +#if defined(_MSC_VER) && !defined(InterlockedAdd) +/* MSVC before 2013 only defines InterlockedAdd* for the Itanium architecture */ +static inline gint32 InterlockedAdd(volatile gint32 *dest, gint32 add) +{ + return InterlockedExchangeAdd (dest, add) + add; +} +#endif + +#if defined(_MSC_VER) && !defined(InterlockedAdd64) +#if defined(InterlockedExchangeAdd64) +/* This may be defined only on amd64 */ +static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add) +{ + return InterlockedExchangeAdd64 (dest, add) + add; +} +#else +static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add) +{ + gint64 prev_value; + + do { + prev_value = *dest; + } while (prev_value != InterlockedCompareExchange64(dest, prev_value + add, prev_value)); + + return prev_value + add; +} +#endif +#endif + +#ifdef HOST_WIN32 +#define TO_INTERLOCKED_ARGP(ptr) ((volatile LONG*)(ptr)) +#else +#define TO_INTERLOCKED_ARGP(ptr) (ptr) +#endif + +/* And now for some dirty hacks... The Windows API doesn't + * provide any useful primitives for this (other than getting + * into architecture-specific madness), so use CAS. */ + +static inline gint32 InterlockedRead(volatile gint32 *src) +{ + return InterlockedCompareExchange (TO_INTERLOCKED_ARGP (src), 0, 0); +} + +static inline gint64 InterlockedRead64(volatile gint64 *src) +{ + return InterlockedCompareExchange64 (src, 0, 0); +} + +static inline gpointer InterlockedReadPointer(volatile gpointer *src) +{ + return InterlockedCompareExchangePointer (src, NULL, NULL); +} + +static inline void InterlockedWrite(volatile gint32 *dst, gint32 val) +{ + InterlockedExchange (TO_INTERLOCKED_ARGP (dst), val); +} + +static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val) +{ + InterlockedExchange64 (dst, val); +} + +static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val) +{ + InterlockedExchangePointer (dst, val); +} + +/* We can't even use CAS for these, so write them out + * explicitly according to x86(_64) semantics... */ + +static inline gint8 InterlockedRead8(volatile gint8 *src) +{ + return *src; +} + +static inline gint16 InterlockedRead16(volatile gint16 *src) +{ + return *src; +} + +static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val) +{ + *dst = val; + mono_memory_barrier (); +} + +static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val) +{ + *dst = val; + mono_memory_barrier (); +} + +/* Prefer GCC atomic ops if the target supports it (see configure.ac). */ #elif defined(USE_GCC_ATOMIC_OPS) static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, @@ -57,7 +199,7 @@ static inline gint32 InterlockedIncrement(volatile gint32 *val) static inline gint32 InterlockedDecrement(volatile gint32 *val) { - return __sync_add_and_fetch (val, -1); + return __sync_sub_and_fetch (val, 1); } static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val) @@ -84,7 +226,50 @@ static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add) return __sync_fetch_and_add (val, add); } -#if defined (TARGET_OSX) +static inline gint8 InterlockedRead8(volatile gint8 *src) +{ + /* Kind of a hack, but GCC doesn't give us anything better, and it's + * certainly not as bad as using a CAS loop. */ + return __sync_fetch_and_add (src, 0); +} + +static inline gint16 InterlockedRead16(volatile gint16 *src) +{ + return __sync_fetch_and_add (src, 0); +} + +static inline gint32 InterlockedRead(volatile gint32 *src) +{ + return __sync_fetch_and_add (src, 0); +} + +static inline void InterlockedWrite8(volatile gint8 *dst, gint8 val) +{ + /* Nothing useful from GCC at all, so fall back to CAS. */ + gint8 old_val; + do { + old_val = *dst; + } while (__sync_val_compare_and_swap (dst, old_val, val) != old_val); +} + +static inline void InterlockedWrite16(volatile gint16 *dst, gint16 val) +{ + gint16 old_val; + do { + old_val = *dst; + } while (__sync_val_compare_and_swap (dst, old_val, val) != old_val); +} + +static inline void InterlockedWrite(volatile gint32 *dst, gint32 val) +{ + /* Nothing useful from GCC at all, so fall back to CAS. */ + gint32 old_val; + do { + old_val = *dst; + } while (__sync_val_compare_and_swap (dst, old_val, val) != old_val); +} + +#if defined (TARGET_OSX) || defined (__arm__) || (defined (__mips__) && !defined (__mips64)) || (defined (__powerpc__) && !defined (__powerpc64__)) || (defined (__sparc__) && !defined (__arch64__)) #define BROKEN_64BIT_ATOMICS_INTRINSIC 1 #endif @@ -115,6 +300,12 @@ static inline gint64 InterlockedExchangeAdd64(volatile gint64 *val, gint64 add) return __sync_fetch_and_add (val, add); } +static inline gint64 InterlockedRead64(volatile gint64 *src) +{ + /* Kind of a hack, but GCC doesn't give us anything better. */ + return __sync_fetch_and_add (src, 0); +} + #else /* Implement 64-bit cmpxchg by hand or emulate it. */ @@ -140,7 +331,7 @@ static inline gint64 InterlockedIncrement64(volatile gint64 *val) do { get = *val; set = get + 1; - } while (InterlockedCompareExchange64 (val, set, get) != set); + } while (InterlockedCompareExchange64 (val, set, get) != get); return set; } @@ -150,7 +341,7 @@ static inline gint64 InterlockedDecrement64(volatile gint64 *val) do { get = *val; set = get - 1; - } while (InterlockedCompareExchange64 (val, set, get) != set); + } while (InterlockedCompareExchange64 (val, set, get) != get); return set; } @@ -160,7 +351,7 @@ static inline gint64 InterlockedAdd64(volatile gint64 *dest, gint64 add) do { get = *dest; set = get + add; - } while (InterlockedCompareExchange64 (dest, set, get) != set); + } while (InterlockedCompareExchange64 (dest, set, get) != get); return set; } @@ -171,6 +362,16 @@ static inline gint64 InterlockedRead64(volatile gint64 *src) #endif +static inline gpointer InterlockedReadPointer(volatile gpointer *src) +{ + return InterlockedCompareExchangePointer (src, NULL, NULL); +} + +static inline void InterlockedWritePointer(volatile gpointer *dst, gpointer val) +{ + InterlockedExchangePointer (dst, val); +} + /* We always implement this in terms of a 64-bit cmpxchg since * GCC doesn't have an intrisic to model it anyway. */ static inline gint64 InterlockedExchange64(volatile gint64 *val, gint64 new_val) @@ -188,166 +389,6 @@ static inline void InterlockedWrite64(volatile gint64 *dst, gint64 val) InterlockedExchange64 (dst, val); } -#elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__) - -G_GNUC_UNUSED -static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp) -{ - register volatile gint32 *dest asm("g1") = _dest; - register gint32 comp asm("o4") = _comp; - register gint32 exch asm("o5") = _exch; - - __asm__ __volatile__( - /* cas [%%g1], %%o4, %%o5 */ - ".word 0xdbe0500c" - : "=r" (exch) - : "0" (exch), "r" (dest), "r" (comp) - : "memory"); - - return exch; -} - -G_GNUC_UNUSED -static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp) -{ - register volatile gpointer *dest asm("g1") = _dest; - register gpointer comp asm("o4") = _comp; - register gpointer exch asm("o5") = _exch; - - __asm__ __volatile__( -#ifdef SPARCV9 - /* casx [%%g1], %%o4, %%o5 */ - ".word 0xdbf0500c" -#else - /* cas [%%g1], %%o4, %%o5 */ - ".word 0xdbe0500c" -#endif - : "=r" (exch) - : "0" (exch), "r" (dest), "r" (comp) - : "memory"); - - return exch; -} - -G_GNUC_UNUSED -static inline gint32 InterlockedIncrement(volatile gint32 *_dest) -{ - register volatile gint32 *dest asm("g1") = _dest; - register gint32 tmp asm("o4"); - register gint32 ret asm("o5"); - - __asm__ __volatile__( - "1: ld [%%g1], %%o4\n\t" - " add %%o4, 1, %%o5\n\t" - /* cas [%%g1], %%o4, %%o5 */ - " .word 0xdbe0500c\n\t" - " cmp %%o4, %%o5\n\t" - " bne 1b\n\t" - " add %%o5, 1, %%o5" - : "=&r" (tmp), "=&r" (ret) - : "r" (dest) - : "memory", "cc"); - - return ret; -} - -G_GNUC_UNUSED -static inline gint32 InterlockedDecrement(volatile gint32 *_dest) -{ - register volatile gint32 *dest asm("g1") = _dest; - register gint32 tmp asm("o4"); - register gint32 ret asm("o5"); - - __asm__ __volatile__( - "1: ld [%%g1], %%o4\n\t" - " sub %%o4, 1, %%o5\n\t" - /* cas [%%g1], %%o4, %%o5 */ - " .word 0xdbe0500c\n\t" - " cmp %%o4, %%o5\n\t" - " bne 1b\n\t" - " sub %%o5, 1, %%o5" - : "=&r" (tmp), "=&r" (ret) - : "r" (dest) - : "memory", "cc"); - - return ret; -} - -G_GNUC_UNUSED -static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch) -{ - register volatile gint32 *dest asm("g1") = _dest; - register gint32 tmp asm("o4"); - register gint32 ret asm("o5"); - - __asm__ __volatile__( - "1: ld [%%g1], %%o4\n\t" - " mov %3, %%o5\n\t" - /* cas [%%g1], %%o4, %%o5 */ - " .word 0xdbe0500c\n\t" - " cmp %%o4, %%o5\n\t" - " bne 1b\n\t" - " nop" - : "=&r" (tmp), "=&r" (ret) - : "r" (dest), "r" (exch) - : "memory", "cc"); - - return ret; -} - -G_GNUC_UNUSED -static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch) -{ - register volatile gpointer *dest asm("g1") = _dest; - register gpointer tmp asm("o4"); - register gpointer ret asm("o5"); - - __asm__ __volatile__( -#ifdef SPARCV9 - "1: ldx [%%g1], %%o4\n\t" -#else - "1: ld [%%g1], %%o4\n\t" -#endif - " mov %3, %%o5\n\t" -#ifdef SPARCV9 - /* casx [%%g1], %%o4, %%o5 */ - " .word 0xdbf0500c\n\t" -#else - /* cas [%%g1], %%o4, %%o5 */ - " .word 0xdbe0500c\n\t" -#endif - " cmp %%o4, %%o5\n\t" - " bne 1b\n\t" - " nop" - : "=&r" (tmp), "=&r" (ret) - : "r" (dest), "r" (exch) - : "memory", "cc"); - - return ret; -} - -G_GNUC_UNUSED -static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add) -{ - register volatile gint32 *dest asm("g1") = _dest; - register gint32 tmp asm("o4"); - register gint32 ret asm("o5"); - - __asm__ __volatile__( - "1: ld [%%g1], %%o4\n\t" - " add %%o4, %3, %%o5\n\t" - /* cas [%%g1], %%o4, %%o5 */ - " .word 0xdbe0500c\n\t" - " cmp %%o4, %%o5\n\t" - " bne 1b\n\t" - " add %%o5, %3, %%o5" - : "=&r" (tmp), "=&r" (ret) - : "r" (dest), "r" (add) - : "memory", "cc"); - - return ret; -} - #elif defined(__ia64__) #ifdef __INTEL_COMPILER @@ -482,6 +523,16 @@ extern gint64 InterlockedExchange64(volatile gint64 *dest, gint64 exch); extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch); extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add); extern gint64 InterlockedExchangeAdd64(volatile gint64 *dest, gint64 add); +extern gint8 InterlockedRead8(volatile gint8 *src); +extern gint16 InterlockedRead16(volatile gint16 *src); +extern gint32 InterlockedRead(volatile gint32 *src); +extern gint64 InterlockedRead64(volatile gint64 *src); +extern gpointer InterlockedReadPointer(volatile gpointer *src); +extern void InterlockedWrite8(volatile gint8 *dst, gint8 val); +extern void InterlockedWrite16(volatile gint16 *dst, gint16 val); +extern void InterlockedWrite(volatile gint32 *dst, gint32 val); +extern void InterlockedWrite64(volatile gint64 *dst, gint64 val); +extern void InterlockedWritePointer(volatile gpointer *dst, gpointer val); #endif