* src/threads/atomic.hpp: Reorganized namespace declarations.
(CAS_32_functor, CAS_64_functor, CAS_chooser, CAS): Helper templates for letting
the compiler figure out which version of compare_and_swap to use.
* src/threads/atomic.cpp: generic_compare_and_swap now lives in namespace Atomic.
* src/native/vm/sun_misc_Unsafe.cpp: Adapted usage of compare_and_swap.
* src/vm/jit/alpha/md-atomic.hpp: Adapted to new namespace.
* src/vm/jit/arm/md-atomic.hpp: Likewise.
* src/vm/jit/i386/md-atomic.hpp: Likewise.
* src/vm/jit/m68k/md-atomic.hpp: Likewise.
* src/vm/jit/mips/md-atomic.hpp: Likewise.
* src/vm/jit/powerpc/md-atomic.hpp: Likewise.
* src/vm/jit/powerpc64/md-atomic.hpp: Likewise.
* src/vm/jit/s390/md-atomic.hpp: Likewise.
* src/vm/jit/sparc64/md-atomic.hpp: Likewise.
* src/vm/jit/x86_64/md-atomic.hpp: Likewise.
*/
JNIEXPORT jboolean JNICALL Java_sun_misc_Unsafe_compareAndSwapObject(JNIEnv *env, jobject _this, jobject o, jlong offset, jobject expected, jobject x)
{
- volatile void **p;
+ void **p;
void *result;
/* XXX Use LLNI */
- p = (volatile void **) (((uint8_t *) o) + offset);
+ p = (void **) (((uint8_t *) o) + offset);
- result = Atomic::compare_and_swap(p, expected, x);
+ result = Atomic::compare_and_swap(p, (void *) expected, (void *) x);
if (result == expected)
return true;
p = (uint32_t *) (((uint8_t *) o) + offset);
- result = Atomic::compare_and_swap(p, expected, x);
+ result = Atomic::compare_and_swap(p, (uint32_t) expected, (uint32_t) x);
if (result == (uint32_t) expected)
return true;
p = (uint64_t *) (((uint8_t *) o) + offset);
- result = Atomic::compare_and_swap(p, expected, x);
+ result = Atomic::compare_and_swap(p, (uint64_t) expected, (uint64_t) x);
if (result == (uint64_t) expected)
return true;
// Gobal mutex for generic atomic instructions.
static Mutex lock;
+namespace Atomic {
/**
* A generic atomic compare and swap for 32-bit integer values. This
*
* @return value of the memory location before the store
*/
-uint32_t Atomic::generic_compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+uint32_t generic_compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t result;
*
* @return value of the memory location before the store
*/
-uint64_t Atomic::generic_compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+uint64_t generic_compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
uint64_t result;
*
* @return value of the memory location before the store
*/
-void* Atomic::generic_compare_and_swap(volatile void** p, void* oldval, void* newval)
+void* generic_compare_and_swap(volatile void** p, void* oldval, void* newval)
{
void* result;
* A generic memory barrier. This function is using a mutex to
* provide atomicity.
*/
-void Atomic::generic_memory_barrier(void)
+void generic_memory_barrier(void)
{
lock.lock();
lock.unlock();
}
+}
// Legacy C interface.
extern "C" {
-uint32_t Atomic_compare_and_swap_32(volatile uint32_t *p, uint32_t oldval, uint32_t newval) { return Atomic::compare_and_swap(p, oldval, newval); }
-uint64_t Atomic_compare_and_swap_64(volatile uint64_t *p, uint64_t oldval, uint64_t newval) { return Atomic::compare_and_swap(p, oldval, newval); }
-void* Atomic_compare_and_swap_ptr(volatile void** p, void* oldval, void* newval) { return Atomic::compare_and_swap(p, oldval, newval); }
+uint32_t Atomic_compare_and_swap_32(uint32_t *p, uint32_t oldval, uint32_t newval) { return Atomic::compare_and_swap(p, oldval, newval); }
+uint64_t Atomic_compare_and_swap_64(uint64_t *p, uint64_t oldval, uint64_t newval) { return Atomic::compare_and_swap(p, oldval, newval); }
+void* Atomic_compare_and_swap_ptr(void** p, void* oldval, void* newval) { return Atomic::compare_and_swap(p, oldval, newval); }
void Atomic_memory_barrier(void) { Atomic::memory_barrier(); }
void Atomic_write_memory_barrier(void) { Atomic::write_memory_barrier(); }
void Atomic_instruction_barrier(void) { Atomic::instruction_barrier(); }
#ifdef __cplusplus
-class Atomic {
-public:
+namespace Atomic_md {
+ // Machine dependent functions.
+ uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval);
+ uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval);
+
+ void memory_barrier(void);
+ void write_memory_barrier(void);
+ void instruction_barrier(void);
+}
+
+namespace Atomic {
+
// Generic functions.
- static uint32_t generic_compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval);
- static uint64_t generic_compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval);
- static void* generic_compare_and_swap(volatile void** p, void* oldval, void* newval);
- static void generic_memory_barrier(void);
+ uint32_t generic_compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval);
+ uint64_t generic_compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval);
+ void* generic_compare_and_swap(volatile void** p, void* oldval, void* newval);
+ void generic_memory_barrier(void);
- // Machine dependent functions.
- static uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval);
- static uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval);
- static void* compare_and_swap(volatile void** p, void* oldval, void* newval);
- static void memory_barrier(void);
- static void write_memory_barrier(void);
- static void instruction_barrier(void);
-};
+}
// Include machine dependent implementation.
#include "md-atomic.hpp"
+namespace Atomic {
+
+ struct CAS_32_functor {
+ typedef uint32_t value_type;
+ static value_type compare_and_swap(value_type *p, value_type o, value_type n) {
+ return Atomic_md::compare_and_swap(p, o, n);
+ }
+ };
+
+ struct CAS_64_functor {
+ typedef uint64_t value_type;
+ static value_type compare_and_swap(value_type *p, value_type o, value_type n) {
+ return Atomic_md::compare_and_swap(p, o, n);
+ }
+ };
+
+ template<int N> class CAS_chooser;
+ template<> class CAS_chooser<4> {
+ public:
+ typedef CAS_32_functor the_type;
+ };
+ template<> class CAS_chooser<8> {
+ public:
+ typedef CAS_64_functor the_type;
+ };
+
+ template<class T> class CAS {
+ public:
+ typedef typename CAS_chooser<sizeof(T)>::the_type S;
+ static T compare_and_swap(T *p, T o, T n) {
+ return (T) S::compare_and_swap((typename S::value_type*) p,
+ (typename S::value_type) o,
+ (typename S::value_type) n);
+ }
+ };
+
+ template<class T> T compare_and_swap(T *p, T o, T n) {
+ return CAS<T>::compare_and_swap(p, o, n);
+ }
+
+ inline void memory_barrier(void) { Atomic_md::memory_barrier(); }
+ inline void write_memory_barrier(void) { Atomic_md::write_memory_barrier(); }
+ inline void instruction_barrier(void) { Atomic_md::instruction_barrier(); }
+}
+
#else
// Legacy C interface.
-uint32_t Atomic_compare_and_swap_32(volatile uint32_t *p, uint32_t oldval, uint32_t newval);
-uint64_t Atomic_compare_and_swap_64(volatile uint64_t *p, uint64_t oldval, uint64_t newval);
-void* Atomic_compare_and_swap_ptr(volatile void** p, void* oldval, void* newval);
+uint32_t Atomic_compare_and_swap_32(uint32_t *p, uint32_t oldval, uint32_t newval);
+uint64_t Atomic_compare_and_swap_64(uint64_t *p, uint64_t oldval, uint64_t newval);
+void* Atomic_compare_and_swap_ptr(void** p, void* oldval, void* newval);
void Atomic_memory_barrier(void);
void Atomic_write_memory_barrier(void);
void Atomic_instruction_barrier(void);
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t temp;
uint32_t result;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
uint64_t temp;
uint64_t result;
}
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint64_t*) p, (uint64_t) oldval, (uint64_t) newval);
-}
-
-
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("mb" : : : "memory");
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("wmb" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("mb" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t result;
uint32_t temp;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
- return generic_compare_and_swap(p, oldval, newval);
-}
-
-
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint32_t*) p, (uint32_t) oldval, (uint32_t) newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
}
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t result;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
#warning Should we use cmpxchg8b or a generic version?
- return generic_compare_and_swap(p, oldval, newval);
-}
-
-
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint32_t*) p, (uint32_t) oldval, (uint32_t) newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
}
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("lock; add $0, 0(%%esp)" : : : "memory" );
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
// We need the "memory" constraint here because compare_and_swap does not
// have it.
__asm__ __volatile__ ("" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
- return generic_compare_and_swap(p, oldval, newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
}
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
- return generic_compare_and_swap(p, oldval, newval);
-}
-
-
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return generic_compare_and_swap(p, oldval, newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
}
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
generic_memory_barrier();
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t result;
uint32_t temp;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
#if SIZEOF_VOID_P == 8
uint64_t result;
return result;
#else
- return generic_compare_and_swap(p, oldval, newval);
-#endif
-}
-
-
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
-#if SIZEOF_VOID_P == 8
- return (void*) compare_and_swap((volatile uint64_t*) p, (uint64_t) oldval, (uint64_t) newval);
-#else
- return (void*) compare_and_swap((volatile uint32_t*) p, (uint32_t) oldval, (uint32_t) newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
#endif
}
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t temp;
uint32_t result;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
return Atomic::generic_compare_and_swap(p, oldval, newval);
}
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint32_t*) p, (uint32_t) oldval, (uint32_t) newval);
-}
-
-
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("isync" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t temp;
uint32_t result;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
uint64_t temp;
uint64_t result;
}
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint64_t*) p, (uint64_t) oldval, (uint64_t) newval);
-}
-
-
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction memory barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("isync" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t ompare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
__asm__ __volatile__ (
"cs %0,%3,0(%2)\n"
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t ompare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
- return generic_compare_and_swap(p, oldval, newval);
-}
-
-
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint32_t*) p, (uint32_t) oldval, (uint32_t) newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
}
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void emory_barrier(void)
{
__asm__ __volatile__ ("bcr 15,0" : : : "memory" );
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void rite_memory_barrier(void)
{
memory_barrier();
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void nstruction_barrier(void)
{
memory_barrier();
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+
+}
{
#if 0
// This one should be correct.
return result;
#else
- return generic_compare_and_swap(p, oldval, newval);
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
#endif
}
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
uint64_t result;
}
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint64_t*) p, (uint64_t) oldval, (uint64_t) newval);
-}
-
-
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("membar 0x0F" : : : "memory" );
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("wmb" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
__asm__ __volatile__ ("mb" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP
#include "threads/atomic.hpp"
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline uint32_t Atomic::compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
uint32_t result;
*
* @return value of the memory location before the store
*/
-inline uint64_t Atomic::compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
uint64_t result;
}
-/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
- */
-inline void* Atomic::compare_and_swap(volatile void** p, void* oldval, void* newval)
-{
- return (void*) compare_and_swap((volatile uint64_t*) p, (uint64_t) oldval, (uint64_t) newval);
-}
-
-
/**
* A memory barrier.
*/
-inline void Atomic::memory_barrier(void)
+inline void memory_barrier(void)
{
__asm__ __volatile__ ("mfence" : : : "memory");
}
/**
* A write memory barrier.
*/
-inline void Atomic::write_memory_barrier(void)
+inline void write_memory_barrier(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
/**
* An instruction barrier.
*/
-inline void Atomic::instruction_barrier(void)
+inline void instruction_barrier(void)
{
// We need the "memory" constraint here because compare_and_swap does not
// have it.
__asm__ __volatile__ ("" : : : "memory");
}
+}
+
#endif // _MD_ATOMIC_HPP