Revert until we track down the RH9 bug
[mono.git] / mono / io-layer / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  */
9
10 #ifndef _WAPI_ATOMIC_H_
11 #define _WAPI_ATOMIC_H_
12
13 #include <glib.h>
14
15 #include "mono/io-layer/wapi.h"
16
17 #ifdef __i386__
18 #define WAPI_ATOMIC_ASM
19
20 /*
21  * NB: The *Pointer() functions here assume that
22  * sizeof(pointer)==sizeof(gint32)
23  *
24  * NB2: These asm functions assume 486+ (some of the opcodes dont
25  * exist on 386).  If this becomes an issue, we can get configure to
26  * fall back to the non-atomic C versions of these calls.
27  */
28
29 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
30                                                 gint32 exch, gint32 comp)
31 {
32         gint32 old;
33
34         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
35                               : "=m" (*dest), "=a" (old)
36                               : "r" (exch), "m" (*dest), "a" (comp));   
37         return(old);
38 }
39
40 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
41 {
42         gpointer old;
43
44         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
45                               : "=m" (*dest), "=a" (old)
46                               : "r" (exch), "m" (*dest), "a" (comp));   
47         return(old);
48 }
49
50 static inline gint32 InterlockedIncrement(volatile gint32 *val)
51 {
52         gint32 tmp;
53         
54         __asm__ __volatile__ ("lock; xaddl %0, %1"
55                               : "=r" (tmp), "=m" (*val)
56                               : "0" (1), "m" (*val));
57
58         return(tmp+1);
59 }
60
61 static inline gint32 InterlockedDecrement(volatile gint32 *val)
62 {
63         gint32 tmp;
64         
65         __asm__ __volatile__ ("lock; xaddl %0, %1"
66                               : "=r" (tmp), "=m" (*val)
67                               : "0" (-1), "m" (*val));
68
69         return(tmp-1);
70 }
71
72 /*
73  * See
74  * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
75  * for the reasons for using cmpxchg and a loop here.
76  *
77  * That url is no longer valid, but it's still in the google cache at the
78  * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
79  *
80  * For the time being, http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
81  * might work.  Bet it will change soon enough though.
82  */
83 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
84 {
85         gint32 ret;
86         
87         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
88                               : "=m" (*val), "=a" (ret)
89                               : "r" (new_val), "m" (*val), "a" (*val));
90
91         return(ret);
92 }
93
94 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
95                                                   gpointer new_val)
96 {
97         gpointer ret;
98         
99         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
100                               : "=m" (*val), "=a" (ret)
101                               : "r" (new_val), "m" (*val), "a" (*val));
102
103         return(ret);
104 }
105
106 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
107 {
108         gint32 ret;
109         
110         __asm__ __volatile__ ("lock; xaddl %0, %1"
111                               : "=r" (ret), "=m" (*val)
112                               : "0" (add), "m" (*val));
113         
114         return(ret);
115 }
116
117 #elif defined(sparc) || defined (__sparc__)
118 #define WAPI_ATOMIC_ASM
119
120 #define BEGIN_SPIN(tmp,lock) \
121 __asm__ __volatile__("1:        ldstub [%1],%0\n\t"  \
122                              "          cmp %0, 0\n\t" \
123                              "          bne 1b\n\t" \
124                              "          nop" \
125                              : "=&r" (tmp) \
126                              : "r" (&lock) \
127                              : "memory"); 
128
129 #define END_SPIN(lock) \
130 __asm__ __volatile__("stb       %%g0, [%0]"  \
131                       : /* no outputs */ \
132                       : "r" (&lock)\
133                       : "memory");
134
135
136 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
137 {
138         static unsigned char lock;
139         int tmp;
140         gint32 old;
141
142         BEGIN_SPIN(tmp,lock)
143
144         old = *dest;
145         if (old==comp) {
146                 *dest=exch;
147         }
148
149         END_SPIN(lock)
150
151         return(old);
152 }
153
154 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
155 {
156         static unsigned char lock;
157         int tmp;
158         gpointer old;
159
160         BEGIN_SPIN(tmp,lock)
161
162         old = *dest;
163         if (old==comp) {
164                 *dest=exch;
165         }
166
167         END_SPIN(lock)
168
169         return(old);
170 }
171
172 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
173 {
174         static unsigned char lock;
175         int tmp;
176         gint32 ret;
177
178         BEGIN_SPIN(tmp,lock)
179
180         *dest++;
181         ret = *dest;
182
183         END_SPIN(lock)
184
185         return(ret);
186 }
187
188 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
189 {
190         static unsigned char lock;
191         int tmp;
192         gint32 ret;
193
194         BEGIN_SPIN(tmp,lock)
195
196         *dest--;
197         ret = *dest;
198
199         END_SPIN(lock)
200
201         return(ret);
202 }
203
204 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
205 {
206         static unsigned char lock;
207         int tmp;
208         gint32 ret;
209
210         BEGIN_SPIN(tmp,lock)
211
212         ret = *dest;
213         *dest = exch;
214
215         END_SPIN(lock)
216
217         return(ret);
218 }
219
220 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
221 {
222         static unsigned char lock;
223         int tmp;
224         gpointer ret;
225
226         BEGIN_SPIN(tmp,lock)
227
228         ret = *dest;
229         *dest = exch;
230
231         END_SPIN(lock)
232
233         return(ret);
234 }
235
236 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
237 {
238         static unsigned char lock;
239         int tmp;
240         gint32 ret;
241
242         BEGIN_SPIN(tmp,lock)
243
244         ret = *dest;
245         *dest += add;
246
247         END_SPIN(lock)
248
249         return(ret);
250 }
251
252 #elif __s390__
253
254 #define WAPI_ATOMIC_ASM
255
256 static inline gint32 
257 InterlockedCompareExchange(volatile gint32 *dest,
258                            gint32 exch, gint32 comp)
259 {
260         gint32 old;
261
262         __asm__ __volatile__ ("\tL\t%1,%0\n"
263                               "\tCS\t%3,%2,%0\n"
264                               : "=m" (*dest), "=r" (old)
265                               : "r" (exch), "r" (comp)
266                               : "cc");  
267         return(old);
268 }
269
270 #define InterlockedCompareExchangePointer InterlockedCompareExchange
271
272 static inline gint32 
273 InterlockedIncrement(volatile gint32 *val)
274 {
275         gint32 tmp;
276         
277         __asm__ __volatile__ ("0:\tL\t%0,%1\n"
278                               "\tLR\t1,%0\n"
279                               "\tAHI\t1,1\n"
280                               "0:\tCS\t%0,1,%1\n"
281                               "\tJNZ\t0b"
282                               : "=r" (tmp), "+m" (*val)
283                               : : "1", "cc");
284
285         return(tmp+1);
286 }
287
288 static inline gint32 
289 InterlockedDecrement(volatile gint32 *val)
290 {
291         gint32 tmp;
292         
293         __asm__ __volatile__ ("0:\tL\t%0,%1\n"
294                               "\tLR\t1,%0\n"
295                               "\tAHI\t1,-1\n"
296                               "0:\tCS\t%0,1,%1\n"
297                               "\tJNZ\t0b"
298                               : "=r" (tmp), "+m" (*val)
299                               : : "1", "cc");
300
301         return(tmp-1);
302 }
303
304
305 static inline gint32 
306 InterlockedExchange(volatile gint32 *val, gint32 new_val)
307 {
308         gint32 ret;
309         
310         __asm__ __volatile__ ("0:\tL\t%1,%0\n"
311                               "\tCS\t%1,%2,%0\n"
312                               "\tJNZ\t0b"
313                               : "+m" (*val), "=r" (ret)
314                               : "r" (new_val)
315                               : "cc");
316
317         return(ret);
318 }
319
320 #define InterlockedExchangePointer InterlockedExchange
321
322 static inline gint32 
323 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
324 {
325         gint32 ret;
326
327         __asm__ __volatile__ ("0:\tL\t%0,%1\n"
328                               "\tLR\t1,%0\n"
329                               "\tAR\t1,%2\n"
330                               "0:\tCS\t%0,1,%1\n"
331                               "\tJNZ\t0b"
332                               : "=r" (ret), "+m" (*val)
333                               : "r" (add) 
334                               : "1", "cc");
335         
336         return(ret);
337 }
338
339 #elif __ppc__
340 #define WAPI_ATOMIC_ASM
341
342 static inline gint32 InterlockedIncrement(volatile gint32 *val)
343 {
344         gint32 tmp;
345
346         __asm__ __volatile__ ("\nL_ii_loop:\n\t"
347                               "lwarx  %0, 0, %2\n\t"
348                               "addi   %0, %0, 1\n\t"
349                               "stwcx. %0, 0, %2\n\t"
350                               "bne-   L_ii_loop"
351                               : "=r" (tmp) : "0" (tmp), "r" (val));
352         return(tmp);
353 }
354
355 static inline gint32 InterlockedDecrement(volatile gint32 *val)
356 {
357         gint32 tmp;
358
359         __asm__ __volatile__ ("\nL_id_loop:\n\t"
360                               "lwarx  %0, 0, %2\n\t"
361                               "addi   %0, %0, -1\n\t"
362                               "stwcx. %0, 0, %2\n\t"
363                               "bne-   L_id_loop"
364                               : "=r" (tmp) : "0" (tmp), "r" (val));
365         return(tmp);
366 }
367
368 #define InterlockedCompareExchangePointer InterlockedCompareExchange
369
370 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
371                                                 gint32 exch, gint32 comp) {
372         gint32 tmp = 0;
373
374         __asm__ __volatile__ ("\nL_ice_loop:\n\t"
375                              "lwarx   %0, 0, %1\n\t"
376                              "cmpw    %2, %3\n\t" 
377                              "bne-    L_ice_diff\n\t"
378                              "stwcx.  %4, 0, %1\n\t"
379                              "bne-    L_ice_loop\n"
380                              "L_ice_diff:"
381                              : "=r" (tmp)
382                              : "r" (dest), "0" (tmp) ,"r" (comp), "r" (exch));
383         return(tmp);
384 }
385
386 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
387 {
388         gint32 tmp;
389
390         __asm__ __volatile__ ("\nL_ie_loop:\n\t"
391                               "lwarx  %0, 0, %1\n\t"
392                               "stwcx. %2, 0, %1\n\t"
393                               "bne    L_ie_loop"
394                               : "=r" (tmp) : "r" (dest), "r" (exch));
395         return(tmp);
396 }
397 #define InterlockedExchangePointer InterlockedExchange
398
399 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
400 {
401         gint32 tmp;
402
403         __asm__ __volatile__ ("\nL_iea_loop:\n\t"
404                               "lwarx  %0, 0, %2\n\t"
405                               "add    %1, %3, %4\n\t"
406                               "stwcx. %1, 0, %2\n\t"
407                               "bne    L_iea_loop"
408                               : "=r" (tmp), "=r" (add)
409                               : "r" (dest), "0" (tmp), "1" (add));
410         return(tmp);
411 }
412
413 #elif defined(__arm__)
414 #define WAPI_ATOMIC_ASM
415
416 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
417 {
418         int a, b;
419
420         __asm__ __volatile__ (    "0:\n\t"
421                                   "ldr %1, [%2]\n\t"
422                                   "cmp %1, %4\n\t"
423                                   "bne 1f\n\t"
424                                   "swp %0, %3, [%2]\n\t"
425                                   "cmp %0, %1\n\t"
426                                   "swpne %3, %0, [%2]\n\t"
427                                   "bne 0b\n\t"
428                                   "1:"
429                                   : "=&r" (a), "=&r" (b)
430                                   : "r" (dest), "r" (exch), "r" (comp)
431                                   : "cc", "memory");
432
433         return a;
434 }
435
436 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
437 {
438         gpointer a, b;
439
440         __asm__ __volatile__ (    "0:\n\t"
441                                   "ldr %1, [%2]\n\t"
442                                   "cmp %1, %4\n\t"
443                                   "bne 1f\n\t"
444                                   "swpeq %0, %3, [%2]\n\t"
445                                   "cmp %0, %1\n\t"
446                                   "swpne %3, %0, [%2]\n\t"
447                                   "bne 0b\n\t"
448                                   "1:"
449                                   : "=&r" (a), "=&r" (b)
450                                   : "r" (dest), "r" (exch), "r" (comp)
451                                   : "cc", "memory");
452
453         return a;
454 }
455
456 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
457 {
458         int a, b, c;
459
460         __asm__ __volatile__ (  "0:\n\t"
461                                 "ldr %0, [%3]\n\t"
462                                 "add %1, %0, %4\n\t"
463                                 "swp %2, %1, [%3]\n\t"
464                                 "cmp %0, %2\n\t"
465                                 "swpne %1, %2, [%3]\n\t"
466                                 "bne 0b"
467                                 : "=&r" (a), "=&r" (b), "=&r" (c)
468                                 : "r" (dest), "r" (1)
469                                 : "cc", "memory");
470
471         return b;
472 }
473
474 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
475 {
476         int a, b, c;
477
478         __asm__ __volatile__ (  "0:\n\t"
479                                 "ldr %0, [%3]\n\t"
480                                 "add %1, %0, %4\n\t"
481                                 "swp %2, %1, [%3]\n\t"
482                                 "cmp %0, %2\n\t"
483                                 "swpne %1, %2, [%3]\n\t"
484                                 "bne 0b"
485                                 : "=&r" (a), "=&r" (b), "=&r" (c)
486                                 : "r" (dest), "r" (-1)
487                                 : "cc", "memory");
488
489         return b;
490 }
491
492 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
493 {
494         int a;
495
496         __asm__ __volatile__ (  "swp %0, %2, [%1]"
497                                 : "=&r" (a)
498                                 : "r" (dest), "r" (exch));
499
500         return a;
501 }
502
503 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
504 {
505         gpointer a;
506
507         __asm__ __volatile__ (  "swp %0, %2, [%1]"
508                                 : "=&r" (a)
509                                 : "r" (dest), "r" (exch));
510
511         return a;
512 }
513
514 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
515 {
516         int a, b, c;
517
518         __asm__ __volatile__ (  "0:\n\t"
519                                 "ldr %0, [%3]\n\t"
520                                 "add %1, %0, %4\n\t"
521                                 "swp %2, %1, [%3]\n\t"
522                                 "cmp %0, %2\n\t"
523                                 "swpne %1, %2, [%3]\n\t"
524                                 "bne 0b"
525                                 : "=&r" (a), "=&r" (b), "=&r" (c)
526                                 : "r" (dest), "r" (add)
527                                 : "cc", "memory");
528
529         return a;
530 }
531
532 #else
533
534 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
535 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
536 extern gint32 InterlockedIncrement(volatile gint32 *dest);
537 extern gint32 InterlockedDecrement(volatile gint32 *dest);
538 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
539 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
540 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
541 #endif
542
543 #endif /* _WAPI_ATOMIC_H_ */