0b44c59cf3b47f3ba1b3b7da5702775badf7a687
[mono.git] / mono / io-layer / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  */
9
10 #ifndef _WAPI_ATOMIC_H_
11 #define _WAPI_ATOMIC_H_
12
13 #include <glib.h>
14
15 #include "mono/io-layer/wapi.h"
16
17 #if defined(__i386__) || defined(__x86_64__)
18 #define WAPI_ATOMIC_ASM
19
20 /*
21  * NB: The *Pointer() functions here assume that
22  * sizeof(pointer)==sizeof(gint32)
23  *
24  * NB2: These asm functions assume 486+ (some of the opcodes dont
25  * exist on 386).  If this becomes an issue, we can get configure to
26  * fall back to the non-atomic C versions of these calls.
27  */
28
29 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
30                                                 gint32 exch, gint32 comp)
31 {
32         gint32 old;
33
34         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
35                               : "=m" (*dest), "=a" (old)
36                               : "r" (exch), "m" (*dest), "a" (comp));   
37         return(old);
38 }
39
40 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
41 {
42         gpointer old;
43
44         __asm__ __volatile__ ("lock; "
45 #ifdef __x86_64__
46                               "cmpxchgq"
47 #else
48                               "cmpxchgl"
49 #endif
50                               " %2, %0"
51                               : "=m" (*dest), "=a" (old)
52                               : "r" (exch), "m" (*dest), "a" (comp));   
53
54         return(old);
55 }
56
57 static inline gint32 InterlockedIncrement(volatile gint32 *val)
58 {
59         gint32 tmp;
60         
61         __asm__ __volatile__ ("lock; xaddl %0, %1"
62                               : "=r" (tmp), "=m" (*val)
63                               : "0" (1), "m" (*val));
64
65         return(tmp+1);
66 }
67
68 static inline gint32 InterlockedDecrement(volatile gint32 *val)
69 {
70         gint32 tmp;
71         
72         __asm__ __volatile__ ("lock; xaddl %0, %1"
73                               : "=r" (tmp), "=m" (*val)
74                               : "0" (-1), "m" (*val));
75
76         return(tmp-1);
77 }
78
79 /*
80  * See
81  * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
82  * for the reasons for using cmpxchg and a loop here.
83  *
84  * That url is no longer valid, but it's still in the google cache at the
85  * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
86  *
87  * For the time being, http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
88  * might work.  Bet it will change soon enough though.
89  */
90 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
91 {
92         gint32 ret;
93         
94         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
95                               : "=m" (*val), "=a" (ret)
96                               : "r" (new_val), "m" (*val), "a" (*val));
97
98         return(ret);
99 }
100
101 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
102                                                   gpointer new_val)
103 {
104         gpointer ret;
105         
106         __asm__ __volatile__ ("1:; lock; "
107 #ifdef __x86_64__
108                               "cmpxchgq"
109 #else
110                               "cmpxchgl"
111 #endif
112                               " %2, %0; jne 1b"
113                               : "=m" (*val), "=a" (ret)
114                               : "r" (new_val), "m" (*val), "a" (*val));
115
116         return(ret);
117 }
118
119 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
120 {
121         gint32 ret;
122         
123         __asm__ __volatile__ ("lock; xaddl %0, %1"
124                               : "=r" (ret), "=m" (*val)
125                               : "0" (add), "m" (*val));
126         
127         return(ret);
128 }
129
130 #elif defined(sparc) || defined (__sparc__)
131 #define WAPI_ATOMIC_ASM
132
133 #ifdef __GNUC__
134 #define BEGIN_SPIN(tmp,lock) \
135 __asm__ __volatile__("1:        ldstub [%1],%0\n\t"  \
136                              "          cmp %0, 0\n\t" \
137                              "          bne 1b\n\t" \
138                              "          nop" \
139                              : "=&r" (tmp) \
140                              : "r" (&lock) \
141                              : "memory"); 
142
143 #define END_SPIN(lock) \
144 __asm__ __volatile__("stb       %%g0, [%0]"  \
145                       : /* no outputs */ \
146                       : "r" (&lock)\
147                       : "memory");
148 #else
149 static inline void begin_spin(volatile unsigned char *lock)
150 {
151         asm("1: ldstub [%i0], %l0");
152         asm("cmp %l0,0");
153         asm("bne 1b");
154         asm("nop");
155 }
156 #define BEGIN_SPIN(tmp,lock) begin_spin(&lock);
157 #define END_SPIN(lock) ((lock) = 0);
158 #endif
159
160 extern volatile unsigned char _wapi_sparc_lock;
161
162 G_GNUC_UNUSED 
163 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
164 {
165         int tmp;
166         gint32 old;
167
168         BEGIN_SPIN(tmp,_wapi_sparc_lock)
169
170         old = *dest;
171         if (old==comp) {
172                 *dest=exch;
173         }
174
175         END_SPIN(_wapi_sparc_lock)
176
177         return(old);
178 }
179
180 G_GNUC_UNUSED 
181 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
182 {
183         int tmp;
184         gpointer old;
185
186         BEGIN_SPIN(tmp,_wapi_sparc_lock)
187
188         old = *dest;
189         if (old==comp) {
190                 *dest=exch;
191         }
192
193         END_SPIN(_wapi_sparc_lock)
194
195         return(old);
196 }
197
198 G_GNUC_UNUSED 
199 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
200 {
201         int tmp;
202         gint32 ret;
203
204         BEGIN_SPIN(tmp,_wapi_sparc_lock)
205
206         (*dest)++;
207         ret = *dest;
208
209         END_SPIN(_wapi_sparc_lock)
210
211         return(ret);
212 }
213
214 G_GNUC_UNUSED 
215 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
216 {
217         int tmp;
218         gint32 ret;
219
220         BEGIN_SPIN(tmp,_wapi_sparc_lock)
221
222         (*dest)--;
223         ret = *dest;
224
225         END_SPIN(_wapi_sparc_lock)
226
227         return(ret);
228 }
229
230 G_GNUC_UNUSED
231 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
232 {
233         int tmp;
234         gint32 ret;
235
236         BEGIN_SPIN(tmp,_wapi_sparc_lock)
237
238         ret = *dest;
239         *dest = exch;
240
241         END_SPIN(_wapi_sparc_lock)
242
243         return(ret);
244 }
245
246 G_GNUC_UNUSED
247 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
248 {
249         int tmp;
250         gpointer ret;
251
252         BEGIN_SPIN(tmp,_wapi_sparc_lock)
253
254         ret = *dest;
255         *dest = exch;
256
257         END_SPIN(_wapi_sparc_lock)
258
259         return(ret);
260 }
261
262 G_GNUC_UNUSED
263 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
264 {
265         int tmp;
266         gint32 ret;
267
268         BEGIN_SPIN(tmp,_wapi_sparc_lock)
269
270         ret = *dest;
271         *dest += add;
272
273         END_SPIN(_wapi_sparc_lock)
274
275         return(ret);
276 }
277
278 #elif __s390__
279
280 #define WAPI_ATOMIC_ASM
281
282 static inline gint32 
283 InterlockedCompareExchange(volatile gint32 *dest,
284                            gint32 exch, gint32 comp)
285 {
286         gint32 old;
287
288         __asm__ __volatile__ ("\tLA\t1,%0\n"
289                               "0:\tL\t%1,%0\n"
290                               "\tCR\t%1,%3\n"
291                               "\tJNE\t1f\n"
292                               "\tCS\t%1,%2,0(1)\n"
293                               "\tJNZ\t0b\n"
294                               "1:\n"
295                               : "+m" (*dest), "+r" (old)
296                               : "r" (exch), "r" (comp)
297                               : "1", "cc");     
298         return(old);
299 }
300
301 #ifndef __s390x__
302 #  define InterlockedCompareExchangePointer InterlockedCompareExchange
303 # else
304 static inline gpointer 
305 InterlockedCompareExchangePointer(volatile gpointer *dest, 
306                                   gpointer exch, 
307                                   gpointer comp)
308 {
309         gpointer old;
310
311         __asm__ __volatile__ ("\tLA\t1,%0\n"
312                               "0:\tLG\t%1,%0\n"
313                               "\tCGR\t%1,%3\n"
314                               "\tJNE\t1f\n"
315                               "\tCSG\t%1,%2,0(1)\n"
316                               "\tJNZ\t0b\n"
317                               "1:\n"
318                               : "+m" (*dest), "+r" (old)
319                               : "r" (exch), "r" (comp)
320                               : "1", "cc");
321
322         return(old);
323 }
324 # endif
325
326
327 static inline gint32 
328 InterlockedIncrement(volatile gint32 *val)
329 {
330         gint32 tmp;
331         
332         __asm__ __volatile__ ("\tLA\t2,%1\n"
333                               "0:\tL\t%0,%1\n"
334                               "\tLR\t1,%0\n"
335                               "\tAHI\t1,1\n"
336                               "\tCS\t%0,1,0(2)\n"
337                               "\tJNZ\t0b\n"
338                               "\tLR\t%0,1"
339                               : "=r" (tmp), "+m" (*val)
340                               : : "1", "2", "cc");
341
342         return(tmp);
343 }
344
345 static inline gint32 
346 InterlockedDecrement(volatile gint32 *val)
347 {
348         gint32 tmp;
349         
350         __asm__ __volatile__ ("\tLA\t2,%1\n"
351                               "0:\tL\t%0,%1\n"
352                               "\tLR\t1,%0\n"
353                               "\tAHI\t1,-1\n"
354                               "\tCS\t%0,1,0(2)\n"
355                               "\tJNZ\t0b\n"
356                               "\tLR\t%0,1"
357                               : "=r" (tmp), "+m" (*val)
358                               : : "1", "2", "cc");
359
360         return(tmp);
361 }
362
363
364 static inline gint32 
365 InterlockedExchange(volatile gint32 *val, gint32 new_val)
366 {
367         gint32 ret;
368         
369         __asm__ __volatile__ ("\tLA\t1,%0\n"
370                               "0:\tL\t%1,%0\n"
371                               "\tCS\t%1,%2,0(1)\n"
372                               "\tJNZ\t0b"
373                               : "+m" (*val), "+r" (ret)
374                               : "r" (new_val)
375                               : "1", "cc");
376
377         return(ret);
378 }
379
380 # ifndef __s390x__
381 #  define InterlockedExchangePointer InterlockedExchange
382 # else
383 static inline gpointer
384 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
385 {
386         gpointer ret;
387         
388         __asm__ __volatile__ ("\tLA\t1,%1\n"
389                               "0:\tLG\t%1,%0\n"
390                               "\tCSG\t%1,%2,0(1)\n"
391                               "\tJNZ\t0b"
392                               : "+m" (*val), "+r" (ret)
393                               : "r" (new_val)
394                               : "1", "cc");
395
396         return(ret);
397 }
398 # endif
399
400 static inline gint32 
401 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
402 {
403         gint32 ret;
404
405         __asm__ __volatile__ ("\tLA\t2,%1\n"
406                               "0:\tL\t%0,%1\n"
407                               "\tLR\t1,%0\n"
408                               "\tAR\t1,%2\n"
409                               "\tCS\t%0,1,0(2)\n"
410                               "\tJNZ\t0b"
411                               : "=r" (ret), "+m" (*val)
412                               : "r" (add) 
413                               : "1", "2", "cc");
414         
415         return(ret);
416 }
417
418 #elif defined(__ppc__) || defined (__powerpc__)
419 #define WAPI_ATOMIC_ASM
420
421 static inline gint32 InterlockedIncrement(volatile gint32 *val)
422 {
423         gint32 tmp = 0;
424
425         __asm__ __volatile__ ("\n1:\n\t"
426                               "lwarx  %0, 0, %2\n\t"
427                               "addi   %1, %0, 1\n\t"
428                               "stwcx. %1, 0, %2\n\t"
429                               "bne-   1b"
430                               : "=&b" (tmp): "r" (tmp), "r" (val): "cc", "memory");
431         return tmp + 1;
432 }
433
434 static inline gint32 InterlockedDecrement(volatile gint32 *val)
435 {
436         gint32 tmp = 0;
437
438         __asm__ __volatile__ ("\n1:\n\t"
439                               "lwarx  %0, 0, %2\n\t"
440                               "addi   %1, %0, -1\n\t"
441                               "stwcx. %1, 0, %2\n\t"
442                               "bne-   1b"
443                               : "=&b" (tmp) : "r" (tmp), "r" (val): "cc", "memory");
444         return tmp - 1;
445 }
446
447 #define InterlockedCompareExchangePointer InterlockedCompareExchange
448
449 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
450                                                 gint32 exch, gint32 comp) {
451         gint32 tmp = 0;
452
453         __asm__ __volatile__ ("\n1:\n\t"
454                              "lwarx   %0, 0, %1\n\t"
455                              "cmpw    %2, %3\n\t" 
456                              "bne-    2f\n\t"
457                              "stwcx.  %4, 0, %1\n\t"
458                              "bne-    1b\n"
459                              "2:"
460                              : "=r" (tmp)
461                              : "r" (dest), "0" (tmp) ,"r" (comp), "r" (exch): "cc", "memory");
462         return(tmp);
463 }
464
465 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
466 {
467         gint32 tmp = 0;
468
469         __asm__ __volatile__ ("\n1:\n\t"
470                               "lwarx  %0, 0, %2\n\t"
471                               "stwcx. %3, 0, %2\n\t"
472                               "bne    1b"
473                               : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
474         return(tmp);
475 }
476 #define InterlockedExchangePointer InterlockedExchange
477
478 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
479 {
480         gint32 tmp = 0;
481
482         __asm__ __volatile__ ("\n1:\n\t"
483                               "lwarx  %0, 0, %2\n\t"
484                               "add    %1, %3, %4\n\t"
485                               "stwcx. %1, 0, %2\n\t"
486                               "bne    1b"
487                               : "=r" (tmp), "=r" (add)
488                               : "r" (dest), "0" (tmp), "1" (add) : "cc", "memory");
489         return(tmp);
490 }
491
492 #elif defined(__arm__)
493 #define WAPI_ATOMIC_ASM
494
495 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
496 {
497         int a, b;
498
499         __asm__ __volatile__ (    "0:\n\t"
500                                   "ldr %1, [%2]\n\t"
501                                   "cmp %1, %4\n\t"
502                                   "bne 1f\n\t"
503                                   "swp %0, %3, [%2]\n\t"
504                                   "cmp %0, %1\n\t"
505                                   "swpne %3, %0, [%2]\n\t"
506                                   "bne 0b\n\t"
507                                   "1:"
508                                   : "=&r" (a), "=&r" (b)
509                                   : "r" (dest), "r" (exch), "r" (comp)
510                                   : "cc", "memory");
511
512         return a;
513 }
514
515 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
516 {
517         gpointer a, b;
518
519         __asm__ __volatile__ (    "0:\n\t"
520                                   "ldr %1, [%2]\n\t"
521                                   "cmp %1, %4\n\t"
522                                   "bne 1f\n\t"
523                                   "swpeq %0, %3, [%2]\n\t"
524                                   "cmp %0, %1\n\t"
525                                   "swpne %3, %0, [%2]\n\t"
526                                   "bne 0b\n\t"
527                                   "1:"
528                                   : "=&r" (a), "=&r" (b)
529                                   : "r" (dest), "r" (exch), "r" (comp)
530                                   : "cc", "memory");
531
532         return a;
533 }
534
535 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
536 {
537         int a, b, c;
538
539         __asm__ __volatile__ (  "0:\n\t"
540                                 "ldr %0, [%3]\n\t"
541                                 "add %1, %0, %4\n\t"
542                                 "swp %2, %1, [%3]\n\t"
543                                 "cmp %0, %2\n\t"
544                                 "swpne %1, %2, [%3]\n\t"
545                                 "bne 0b"
546                                 : "=&r" (a), "=&r" (b), "=&r" (c)
547                                 : "r" (dest), "r" (1)
548                                 : "cc", "memory");
549
550         return b;
551 }
552
553 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
554 {
555         int a, b, c;
556
557         __asm__ __volatile__ (  "0:\n\t"
558                                 "ldr %0, [%3]\n\t"
559                                 "add %1, %0, %4\n\t"
560                                 "swp %2, %1, [%3]\n\t"
561                                 "cmp %0, %2\n\t"
562                                 "swpne %1, %2, [%3]\n\t"
563                                 "bne 0b"
564                                 : "=&r" (a), "=&r" (b), "=&r" (c)
565                                 : "r" (dest), "r" (-1)
566                                 : "cc", "memory");
567
568         return b;
569 }
570
571 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
572 {
573         int a;
574
575         __asm__ __volatile__ (  "swp %0, %2, [%1]"
576                                 : "=&r" (a)
577                                 : "r" (dest), "r" (exch));
578
579         return a;
580 }
581
582 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
583 {
584         gpointer a;
585
586         __asm__ __volatile__ (  "swp %0, %2, [%1]"
587                                 : "=&r" (a)
588                                 : "r" (dest), "r" (exch));
589
590         return a;
591 }
592
593 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
594 {
595         int a, b, c;
596
597         __asm__ __volatile__ (  "0:\n\t"
598                                 "ldr %0, [%3]\n\t"
599                                 "add %1, %0, %4\n\t"
600                                 "swp %2, %1, [%3]\n\t"
601                                 "cmp %0, %2\n\t"
602                                 "swpne %1, %2, [%3]\n\t"
603                                 "bne 0b"
604                                 : "=&r" (a), "=&r" (b), "=&r" (c)
605                                 : "r" (dest), "r" (add)
606                                 : "cc", "memory");
607
608         return a;
609 }
610
611 #else
612
613 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
614 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
615 extern gint32 InterlockedIncrement(volatile gint32 *dest);
616 extern gint32 InterlockedDecrement(volatile gint32 *dest);
617 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
618 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
619 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
620
621 #if defined(__hpux) && !defined(__GNUC__)
622 #define WAPI_ATOMIC_ASM
623 #endif
624
625 #endif
626
627 #endif /* _WAPI_ATOMIC_H_ */