2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 /* Memory model documented at http://www-106.ibm.com/developerworks/ */
19 /* eserver/articles/archguide.html and (clearer) */
20 /* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
21 /* There appears to be no implicit ordering between any kind of */
22 /* independent memory references. */
23 /* Architecture enforces some ordering based on control dependence. */
24 /* I don't know if that could help. */
25 /* Data-dependent loads are always ordered. */
26 /* Based on the above references, eieio is intended for use on */
27 /* uncached memory, which we don't support. It does not order loads */
28 /* from cached memory. */
29 /* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */
30 /* track some of this down and correcting my misunderstandings. -HB */
31 /* Earl Chew subsequently contributed further fixes & additions. */
33 #include "../all_aligned_atomic_load_store.h"
35 #include "../test_and_set_t_is_ao_t.h"
36 /* There seems to be no byte equivalent of lwarx, so this */
37 /* may really be what we want, at least in the 32-bit case. */
42 __asm__ __volatile__("sync" : : : "memory");
44 #define AO_HAVE_nop_full
46 /* lwsync apparently works for everything but a StoreLoad barrier. */
51 __asm__ __volatile__("sync" : : : "memory");
53 __asm__ __volatile__("lwsync" : : : "memory");
57 #define AO_nop_write() AO_lwsync()
58 #define AO_HAVE_nop_write
60 #define AO_nop_read() AO_lwsync()
61 #define AO_HAVE_nop_read
63 /* We explicitly specify load_acquire, since it is important, and can */
64 /* be implemented relatively cheaply. It could be implemented */
65 /* with an ordinary load followed by a lwsync. But the general wisdom */
66 /* seems to be that a data dependent branch followed by an isync is */
67 /* cheaper. And the documentation is fairly explicit that this also */
68 /* has acquire semantics. */
69 /* ppc64 uses ld not lwz */
71 AO_load_acquire(const volatile AO_t *addr)
74 #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
75 __asm__ __volatile__ (
81 : "m"(*addr) : "memory", "cr0");
83 /* FIXME: We should get gcc to allocate one of the condition */
84 /* registers. I always got "impossible constraint" when I */
85 /* tried the "y" constraint. */
86 __asm__ __volatile__ (
92 : "m"(*addr) : "memory", "cc");
96 #define AO_HAVE_load_acquire
98 /* We explicitly specify store_release, since it relies */
99 /* on the fact that lwsync is also a LoadStore barrier. */
101 AO_store_release(volatile AO_t *addr, AO_t value)
106 #define AO_HAVE_store_release
108 /* This is similar to the code in the garbage collector. Deleting */
109 /* this and having it synthesized from compare_and_swap would probably */
110 /* only cost us a load immediate instruction. */
111 AO_INLINE AO_TS_VAL_t
112 AO_test_and_set(volatile AO_TS_t *addr) {
113 #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
114 /* Completely untested. And we should be using smaller objects anyway. */
115 unsigned long oldval;
116 unsigned long temp = 1; /* locked value */
118 __asm__ __volatile__(
119 "1:ldarx %0,0,%1\n" /* load and reserve */
120 "cmpdi %0, 0\n" /* if load is */
121 "bne 2f\n" /* non-zero, return already set */
122 "stdcx. %2,0,%1\n" /* else store conditional */
123 "bne- 1b\n" /* retry if lost reservation */
124 "2:\n" /* oldval is zero if we set */
126 : "r"(addr), "r"(temp)
130 int temp = 1; /* locked value */
132 __asm__ __volatile__(
133 "1:lwarx %0,0,%1\n" /* load and reserve */
134 "cmpwi %0, 0\n" /* if load is */
135 "bne 2f\n" /* non-zero, return already set */
136 "stwcx. %2,0,%1\n" /* else store conditional */
137 "bne- 1b\n" /* retry if lost reservation */
138 "2:\n" /* oldval is zero if we set */
140 : "r"(addr), "r"(temp)
143 return (AO_TS_VAL_t)oldval;
145 #define AO_HAVE_test_and_set
147 AO_INLINE AO_TS_VAL_t
148 AO_test_and_set_acquire(volatile AO_TS_t *addr) {
149 AO_TS_VAL_t result = AO_test_and_set(addr);
153 #define AO_HAVE_test_and_set_acquire
155 AO_INLINE AO_TS_VAL_t
156 AO_test_and_set_release(volatile AO_TS_t *addr) {
158 return AO_test_and_set(addr);
160 #define AO_HAVE_test_and_set_release
162 AO_INLINE AO_TS_VAL_t
163 AO_test_and_set_full(volatile AO_TS_t *addr) {
166 result = AO_test_and_set(addr);
170 #define AO_HAVE_test_and_set_full
173 AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
176 #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
177 /* FIXME: Completely untested. */
178 __asm__ __volatile__(
179 "1:ldarx %0,0,%2\n" /* load and reserve */
180 "cmpd %0, %4\n" /* if load is not equal to */
181 "bne 2f\n" /* old, fail */
182 "stdcx. %3,0,%2\n" /* else store conditional */
183 "bne- 1b\n" /* retry if lost reservation */
184 "li %1,1\n" /* result = 1; */
186 : "=&r"(oldval), "=&r"(result)
187 : "r"(addr), "r"(new_val), "r"(old), "1"(result)
190 __asm__ __volatile__(
191 "1:lwarx %0,0,%2\n" /* load and reserve */
192 "cmpw %0, %4\n" /* if load is not equal to */
193 "bne 2f\n" /* old, fail */
194 "stwcx. %3,0,%2\n" /* else store conditional */
195 "bne- 1b\n" /* retry if lost reservation */
196 "li %1,1\n" /* result = 1; */
198 : "=&r"(oldval), "=&r"(result)
199 : "r"(addr), "r"(new_val), "r"(old), "1"(result)
204 #define AO_HAVE_compare_and_swap
207 AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) {
208 int result = AO_compare_and_swap(addr, old, new_val);
212 #define AO_HAVE_compare_and_swap_acquire
215 AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) {
217 return AO_compare_and_swap(addr, old, new_val);
219 #define AO_HAVE_compare_and_swap_release
222 AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
225 result = AO_compare_and_swap(addr, old, new_val);
229 #define AO_HAVE_compare_and_swap_full
232 AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
235 #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
236 /* FIXME: Completely untested. */
237 __asm__ __volatile__(
238 "1:ldarx %0,0,%2\n" /* load and reserve */
239 "add %1,%0,%3\n" /* increment */
240 "stdcx. %1,0,%2\n" /* store conditional */
241 "bne- 1b\n" /* retry if lost reservation */
242 : "=&r"(oldval), "=&r"(newval)
243 : "r"(addr), "r"(incr)
246 __asm__ __volatile__(
247 "1:lwarx %0,0,%2\n" /* load and reserve */
248 "add %1,%0,%3\n" /* increment */
249 "stwcx. %1,0,%2\n" /* store conditional */
250 "bne- 1b\n" /* retry if lost reservation */
251 : "=&r"(oldval), "=&r"(newval)
252 : "r"(addr), "r"(incr)
257 #define AO_HAVE_fetch_and_add
260 AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
261 AO_t result = AO_fetch_and_add(addr, incr);
265 #define AO_HAVE_fetch_and_add_acquire
268 AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) {
270 return AO_fetch_and_add(addr, incr);
272 #define AO_HAVE_fetch_and_add_release
275 AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
278 result = AO_fetch_and_add(addr, incr);
282 #define AO_HAVE_fetch_and_add_full
284 #if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
286 # include "../ao_t_is_int.h"