[runtime] Revert this change as it strips the volatileness of the load.
authorJoão Matos <joao@tritao.eu>
Fri, 17 Apr 2015 16:35:56 +0000 (17:35 +0100)
committerJoão Matos <joao@tritao.eu>
Fri, 17 Apr 2015 16:35:56 +0000 (17:35 +0100)
mono/utils/lock-free-alloc.c

index 5bf43ce877e1ebdf8aac34358827bf920aff0f32..3db9b45a92582ff2d897a3b574d70079d73eb72f 100644 (file)
@@ -366,10 +366,7 @@ alloc_from_active_or_partial (MonoLockFreeAllocator *heap)
 
        do {
                unsigned int next;
-               volatile Anchor* value;
-               value = (volatile Anchor *)&desc->anchor.value;
-               old_anchor = *(Anchor *)value;
-               new_anchor = old_anchor;
+               new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
                if (old_anchor.data.state == STATE_EMPTY) {
                        /* We must free it because we own it. */
                        desc_retire (desc);
@@ -474,10 +471,7 @@ mono_lock_free_free (gpointer ptr, size_t block_size)
        sb = desc->sb;
 
        do {
-               volatile Anchor* value;
-               value = (volatile Anchor *)&desc->anchor.value;
-               old_anchor = *(Anchor *)value;
-               new_anchor = old_anchor;
+               new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
                *(unsigned int*)ptr = old_anchor.data.avail;
                new_anchor.data.avail = ((char*)ptr - (char*)sb) / desc->slot_size;
                g_assert (new_anchor.data.avail < LOCK_FREE_ALLOC_SB_USABLE_SIZE (block_size) / desc->slot_size);