s390 support from Neale Ferguson <Neale.Ferguson@SoftwareAG-USA.com>.
[mono.git] / mono / io-layer / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  */
9
10 #ifndef _WAPI_ATOMIC_H_
11 #define _WAPI_ATOMIC_H_
12
13 #include <glib.h>
14
15 #include "mono/io-layer/wapi.h"
16
17 #ifdef __i386__
18 #define WAPI_ATOMIC_ASM
19
20 /*
21  * NB: The *Pointer() functions here assume that
22  * sizeof(pointer)==sizeof(gint32)
23  *
24  * NB2: These asm functions assume 486+ (some of the opcodes dont
25  * exist on 386).  If this becomes an issue, we can get configure to
26  * fall back to the non-atomic C versions of these calls.
27  */
28
29 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
30                                                 gint32 exch, gint32 comp)
31 {
32         gint32 old;
33
34         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
35                               : "=m" (*dest), "=a" (old)
36                               : "r" (exch), "m" (*dest), "a" (comp));   
37         return(old);
38 }
39
40 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
41 {
42         gpointer old;
43
44         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
45                               : "=m" (*dest), "=a" (old)
46                               : "r" (exch), "m" (*dest), "a" (comp));   
47         return(old);
48 }
49
50 static inline gint32 InterlockedIncrement(volatile gint32 *val)
51 {
52         gint32 tmp;
53         
54         __asm__ __volatile__ ("lock; xaddl %0, %1"
55                               : "=r" (tmp), "=m" (*val)
56                               : "0" (1), "m" (*val));
57
58         return(tmp+1);
59 }
60
61 static inline gint32 InterlockedDecrement(volatile gint32 *val)
62 {
63         gint32 tmp;
64         
65         __asm__ __volatile__ ("lock; xaddl %0, %1"
66                               : "=r" (tmp), "=m" (*val)
67                               : "0" (-1), "m" (*val));
68
69         return(tmp-1);
70 }
71
72 /*
73  * See
74  * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
75  * for the reasons for using cmpxchg and a loop here.
76  *
77  * That url is no longer valid, but it's still in the google cache at the
78  * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
79  */
80 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
81 {
82         gint32 ret;
83         
84         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
85                               : "=m" (*val), "=a" (ret)
86                               : "r" (new_val), "m" (*val), "a" (*val));
87
88         return(ret);
89 }
90
91 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
92                                                   gpointer new_val)
93 {
94         gpointer ret;
95         
96         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
97                               : "=m" (*val), "=a" (ret)
98                               : "r" (new_val), "m" (*val), "a" (*val));
99
100         return(ret);
101 }
102
103 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
104 {
105         gint32 ret;
106         
107         __asm__ __volatile__ ("lock; xaddl %0, %1"
108                               : "=r" (ret), "=m" (*val)
109                               : "0" (add), "m" (*val));
110         
111         return(ret);
112 }
113
114 #elif defined(sparc) || defined (__sparc__)
115 #define WAPI_ATOMIC_ASM
116
117 #define BEGIN_SPIN(tmp,lock) \
118 __asm__ __volatile__("1:        ldstub [%1],%0\n\t"  \
119                              "          cmp %0, 0\n\t" \
120                              "          bne 1b\n\t" \
121                              "          nop" \
122                              : "=&r" (tmp) \
123                              : "r" (&lock) \
124                              : "memory"); 
125
126 #define END_SPIN(lock) \
127 __asm__ __volatile__("stb       %%g0, [%0]"  \
128                       : /* no outputs */ \
129                       : "r" (&lock)\
130                       : "memory");
131
132
133 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
134 {
135         static unsigned char lock;
136         int tmp;
137         gint32 old;
138
139         BEGIN_SPIN(tmp,lock)
140
141         old = *dest;
142         if (old==comp) {
143                 *dest=exch;
144         }
145
146         END_SPIN(lock)
147
148         return(old);
149 }
150
151 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
152 {
153         static unsigned char lock;
154         int tmp;
155         gpointer old;
156
157         BEGIN_SPIN(tmp,lock)
158
159         old = *dest;
160         if (old==comp) {
161                 *dest=exch;
162         }
163
164         END_SPIN(lock)
165
166         return(old);
167 }
168
169 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
170 {
171         static unsigned char lock;
172         int tmp;
173         gint32 ret;
174
175         BEGIN_SPIN(tmp,lock)
176
177         *dest++;
178         ret = *dest;
179
180         END_SPIN(lock)
181
182         return(ret);
183 }
184
185 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
186 {
187         static unsigned char lock;
188         int tmp;
189         gint32 ret;
190
191         BEGIN_SPIN(tmp,lock)
192
193         *dest--;
194         ret = *dest;
195
196         END_SPIN(lock)
197
198         return(ret);
199 }
200
201 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
202 {
203         static unsigned char lock;
204         int tmp;
205         gint32 ret;
206
207         BEGIN_SPIN(tmp,lock)
208
209         ret = *dest;
210         *dest = exch;
211
212         END_SPIN(lock)
213
214         return(ret);
215 }
216
217 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
218 {
219         static unsigned char lock;
220         int tmp;
221         gpointer ret;
222
223         BEGIN_SPIN(tmp,lock)
224
225         ret = *dest;
226         *dest = exch;
227
228         END_SPIN(lock)
229
230         return(ret);
231 }
232
233 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
234 {
235         static unsigned char lock;
236         int tmp;
237         gint32 ret;
238
239         BEGIN_SPIN(tmp,lock)
240
241         ret = *dest;
242         *dest += add;
243
244         END_SPIN(lock)
245
246         return(ret);
247 }
248
249 #elif __s390__
250
251 #define WAPI_ATOMIC_ASM
252
253 static inline gint32 
254 InterlockedCompareExchange(volatile gint32 *dest,
255                            gint32 exch, gint32 comp)
256 {
257         gint32 old;
258
259         __asm__ __volatile__ ("\tL\t%1,%0\n"
260                               "\tCS\t%3,%2,%0\n"
261                               : "=m" (*dest), "=r" (old)
262                               : "r" (exch), "r" (comp)
263                               : "cc");  
264         return(old);
265 }
266
267 #define InterlockedCompareExchangePointer InterlockedCompareExchange
268
269 static inline gint32 
270 InterlockedIncrement(volatile gint32 *val)
271 {
272         gint32 tmp;
273         
274         __asm__ __volatile__ ("0:\tL\t%0,%1\n"
275                               "\tLR\t1,%0\n"
276                               "\tAHI\t1,1\n"
277                               "0:\tCS\t%0,1,%1\n"
278                               "\tJNZ\t0b"
279                               : "=r" (tmp), "+m" (*val)
280                               : : "1", "cc");
281
282         return(tmp+1);
283 }
284
285 static inline gint32 
286 InterlockedDecrement(volatile gint32 *val)
287 {
288         gint32 tmp;
289         
290         __asm__ __volatile__ ("0:\tL\t%0,%1\n"
291                               "\tLR\t1,%0\n"
292                               "\tAHI\t1,-1\n"
293                               "0:\tCS\t%0,1,%1\n"
294                               "\tJNZ\t0b"
295                               : "=r" (tmp), "+m" (*val)
296                               : : "1", "cc");
297
298         return(tmp-1);
299 }
300
301
302 static inline gint32 
303 InterlockedExchange(volatile gint32 *val, gint32 new_val)
304 {
305         gint32 ret;
306         
307         __asm__ __volatile__ ("0:\tL\t%1,%0\n"
308                               "\tCS\t%1,%2,%0\n"
309                               "\tJNZ\t0b"
310                               : "+m" (*val), "=r" (ret)
311                               : "r" (new_val)
312                               : "cc");
313
314         return(ret);
315 }
316
317 #define InterlockedExchangePointer InterlockedExchange
318
319 static inline gint32 
320 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
321 {
322         gint32 ret;
323
324         __asm__ __volatile__ ("0:\tL\t%0,%1\n"
325                               "\tLR\t1,%0\n"
326                               "\tAR\t1,%2\n"
327                               "0:\tCS\t%0,1,%1\n"
328                               "\tJNZ\t0b"
329                               : "=r" (ret), "+m" (*val)
330                               : "r" (add) 
331                               : "1", "cc");
332         
333         return(ret);
334 }
335
336 #else
337
338 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
339 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
340 extern gint32 InterlockedIncrement(volatile gint32 *dest);
341 extern gint32 InterlockedDecrement(volatile gint32 *dest);
342 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
343 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
344 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
345 #endif
346
347 #endif /* _WAPI_ATOMIC_H_ */