#include "threads/atomic.hpp"
+/* Apparently, this is the best way to define a memory barrier on Linux.
+ * See for example: http://icedtea.classpath.org/hg/icedtea6/file/7c7835fceadc/ports/hotspot/src/os_cpu/linux_zero/vm/orderAccess_linux_zero.inline.hpp#l29
+ */
+typedef void (__kernel_dmb_t)(void);
+#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
+
+namespace Atomic_md {
/**
* An atomic compare and swap for 32-bit integer values.
*
* @return value of the memory location before the store
*/
-inline static uint32_t Atomic_compare_and_swap_32(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
+inline uint32_t compare_and_swap(volatile uint32_t *p, uint32_t oldval, uint32_t newval)
{
- uint32_t result;
- uint32_t temp;
-
- /* TODO: improve this one! */
- __asm__ __volatile__ (
- "1:\t"
- "ldr %0,[%2]\n\t"
- "cmp %0,%4\n\t"
- "bne 2f\n\t"
- "swp %1,%3,[%2]\n\t"
- "cmp %1,%0\n\t"
- "swpne %0,%1,[%2]\n\t"
- "bne 1b\n\t"
- "2:"
- : "=&r" (result), "=&r" (temp)
- : "r" (p), "r" (newval), "r" (oldval)
- : "cc", "memory"
- );
-
- return result;
+ return __sync_val_compare_and_swap(p, oldval, newval);
}
*
* @return value of the memory location before the store
*/
-inline static uint64_t Atomic_compare_and_swap_64(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
+inline uint64_t compare_and_swap(volatile uint64_t *p, uint64_t oldval, uint64_t newval)
{
- log_println("Atomic_compare_and_swap_64: Use generic version.");
+ return Atomic::generic_compare_and_swap(p, oldval, newval);
}
/**
- * An atomic compare and swap for pointer values.
- *
- * @param p Pointer to memory address.
- * @param oldval Old value to be expected.
- * @param newval New value to be stored.
- *
- * @return value of the memory location before the store
+ * A memory barrier.
*/
-inline static void* Atomic_compare_and_swap_ptr(volatile void** p, void* oldval, void* newval)
+inline void memory_barrier(void)
{
- return (void*) Atomic_compare_and_swap_32((volatile uint32_t*) p, (uint32_t) oldval, (uint32_t) newval);
+ __kernel_dmb();
}
/**
- * A memory barrier.
+ * A write memory barrier.
*/
-inline static void Atomic_memory_barrier(void)
+inline void write_memory_barrier(void)
{
- __asm__ __volatile__ ("" : : : "memory");
+ __kernel_dmb();
}
-#define STORE_ORDER_BARRIER() __asm__ __volatile__ ("" : : : "memory");
-#define MEMORY_BARRIER_AFTER_ATOMIC() __asm__ __volatile__ ("" : : : "memory");
+/**
+ * An instruction barrier.
+ */
+inline void instruction_barrier(void)
+{
+ __asm__ __volatile__ ("" : : : "memory");
+}
+
+}
#endif // _MD_ATOMIC_HPP