[runtime] Fix the mingw build by defining InterlockedCompareExchange64 () which is...
[mono.git] / mono / utils / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  * Copyright 2012 Xamarin Inc
9  */
10
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
13
14 #if defined(__NetBSD__)
15 #include <sys/param.h>
16
17 #if __NetBSD_Version__ > 499004000
18 #include <sys/atomic.h>
19 #define HAVE_ATOMIC_OPS
20 #endif
21
22 #endif
23
24 #include "config.h"
25 #include <glib.h>
26
27 /* On Windows, we always use the functions provided by the Windows API. */
28 #if defined(__WIN32__) || defined(_WIN32)
29
30 #include <windows.h>
31 #define HAS_64BITS_ATOMICS 1
32
33 /* mingw is missing InterlockedCompareExchange64 () from winbase.h */
34 #ifdef __MINGW32__
35 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
36 {
37         return __sync_val_compare_and_swap (dest, comp, exch);
38 }
39 #endif
40
41 /* Prefer GCC atomic ops if the target supports it (see configure.in). */
42 #elif defined(USE_GCC_ATOMIC_OPS)
43
44 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
45                                                 gint32 exch, gint32 comp)
46 {
47         return __sync_val_compare_and_swap (dest, comp, exch);
48 }
49
50 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
51 {
52         return __sync_val_compare_and_swap (dest, comp, exch);
53 }
54
55 static inline gint32 InterlockedIncrement(volatile gint32 *val)
56 {
57         return __sync_add_and_fetch (val, 1);
58 }
59
60 static inline gint32 InterlockedDecrement(volatile gint32 *val)
61 {
62         return __sync_add_and_fetch (val, -1);
63 }
64
65 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
66 {
67         gint32 old_val;
68         do {
69                 old_val = *val;
70         } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
71         return old_val;
72 }
73
74 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
75                                                   gpointer new_val)
76 {
77         gpointer old_val;
78         do {
79                 old_val = *val;
80         } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
81         return old_val;
82 }
83
84 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
85 {
86         return __sync_fetch_and_add (val, add);
87 }
88
89 /*All Apple targets have broken compilers*/
90 #if defined (TARGET_MACH)
91 #define BROKEN_64BIT_ATOMICS_INTRINSIC 1
92 #endif
93
94
95 #if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
96 #define HAS_64BITS_ATOMICS 1
97
98 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
99 {
100         return __sync_val_compare_and_swap (dest, comp, exch);
101 }
102
103 #endif
104
105
106 #elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
107
108 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
109        gint32 exch, gint32 comp)
110 {
111        return atomic_cas_32((uint32_t*)dest, comp, exch);
112 }
113
114 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
115 {
116        return atomic_cas_ptr(dest, comp, exch);
117 }
118
119 static inline gint32 InterlockedIncrement(volatile gint32 *val)
120 {
121        return atomic_inc_32_nv((uint32_t*)val);
122 }
123
124 static inline gint32 InterlockedDecrement(volatile gint32 *val)
125 {
126        return atomic_dec_32_nv((uint32_t*)val);
127 }
128
129 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
130 {
131        return atomic_swap_32((uint32_t*)val, new_val);
132 }
133
134 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
135                gpointer new_val)
136 {
137        return atomic_swap_ptr(val, new_val);
138 }
139
140 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
141 {
142        return atomic_add_32_nv((uint32_t*)val, add) - add;
143 }
144
145 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
146
147 G_GNUC_UNUSED 
148 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
149 {
150        register volatile gint32 *dest asm("g1") = _dest;
151        register gint32 comp asm("o4") = _comp;
152        register gint32 exch asm("o5") = _exch;
153
154        __asm__ __volatile__(
155                /* cas [%%g1], %%o4, %%o5 */
156                ".word 0xdbe0500c"
157                : "=r" (exch)
158                : "0" (exch), "r" (dest), "r" (comp)
159                : "memory");
160
161        return exch;
162 }
163
164 G_GNUC_UNUSED 
165 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
166 {
167        register volatile gpointer *dest asm("g1") = _dest;
168        register gpointer comp asm("o4") = _comp;
169        register gpointer exch asm("o5") = _exch;
170
171        __asm__ __volatile__(
172 #ifdef SPARCV9
173                /* casx [%%g1], %%o4, %%o5 */
174                ".word 0xdbf0500c"
175 #else
176                /* cas [%%g1], %%o4, %%o5 */
177                ".word 0xdbe0500c"
178 #endif
179                : "=r" (exch)
180                : "0" (exch), "r" (dest), "r" (comp)
181                : "memory");
182
183        return exch;
184 }
185
186 G_GNUC_UNUSED 
187 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
188 {
189        register volatile gint32 *dest asm("g1") = _dest;
190        register gint32 tmp asm("o4");
191        register gint32 ret asm("o5");
192
193        __asm__ __volatile__(
194                "1:     ld      [%%g1], %%o4\n\t"
195                "       add     %%o4, 1, %%o5\n\t"
196                /*      cas     [%%g1], %%o4, %%o5 */
197                "       .word   0xdbe0500c\n\t"
198                "       cmp     %%o4, %%o5\n\t"
199                "       bne     1b\n\t"
200                "        add    %%o5, 1, %%o5"
201                : "=&r" (tmp), "=&r" (ret)
202                : "r" (dest)
203                : "memory", "cc");
204
205         return ret;
206 }
207
208 G_GNUC_UNUSED 
209 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
210 {
211        register volatile gint32 *dest asm("g1") = _dest;
212        register gint32 tmp asm("o4");
213        register gint32 ret asm("o5");
214
215        __asm__ __volatile__(
216                "1:     ld      [%%g1], %%o4\n\t"
217                "       sub     %%o4, 1, %%o5\n\t"
218                /*      cas     [%%g1], %%o4, %%o5 */
219                "       .word   0xdbe0500c\n\t"
220                "       cmp     %%o4, %%o5\n\t"
221                "       bne     1b\n\t"
222                "        sub    %%o5, 1, %%o5"
223                : "=&r" (tmp), "=&r" (ret)
224                : "r" (dest)
225                : "memory", "cc");
226
227         return ret;
228 }
229
230 G_GNUC_UNUSED
231 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
232 {
233        register volatile gint32 *dest asm("g1") = _dest;
234        register gint32 tmp asm("o4");
235        register gint32 ret asm("o5");
236
237        __asm__ __volatile__(
238                "1:     ld      [%%g1], %%o4\n\t"
239                "       mov     %3, %%o5\n\t"
240                /*      cas     [%%g1], %%o4, %%o5 */
241                "       .word   0xdbe0500c\n\t"
242                "       cmp     %%o4, %%o5\n\t"
243                "       bne     1b\n\t"
244                "        nop"
245                : "=&r" (tmp), "=&r" (ret)
246                : "r" (dest), "r" (exch)
247                : "memory", "cc");
248
249         return ret;
250 }
251
252 G_GNUC_UNUSED
253 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
254 {
255        register volatile gpointer *dest asm("g1") = _dest;
256        register gpointer tmp asm("o4");
257        register gpointer ret asm("o5");
258
259        __asm__ __volatile__(
260 #ifdef SPARCV9
261                "1:     ldx     [%%g1], %%o4\n\t"
262 #else
263                "1:     ld      [%%g1], %%o4\n\t"
264 #endif
265                "       mov     %3, %%o5\n\t"
266 #ifdef SPARCV9
267                /*      casx    [%%g1], %%o4, %%o5 */
268                "       .word   0xdbf0500c\n\t"
269 #else
270                /*      cas     [%%g1], %%o4, %%o5 */
271                "       .word   0xdbe0500c\n\t"
272 #endif
273                "       cmp     %%o4, %%o5\n\t"
274                "       bne     1b\n\t"
275                "        nop"
276                : "=&r" (tmp), "=&r" (ret)
277                : "r" (dest), "r" (exch)
278                : "memory", "cc");
279
280         return ret;
281 }
282
283 G_GNUC_UNUSED
284 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
285 {
286        register volatile gint32 *dest asm("g1") = _dest;
287        register gint32 tmp asm("o4");
288        register gint32 ret asm("o5");
289
290        __asm__ __volatile__(
291                "1:     ld      [%%g1], %%o4\n\t"
292                "       add     %%o4, %3, %%o5\n\t"
293                /*      cas     [%%g1], %%o4, %%o5 */
294                "       .word   0xdbe0500c\n\t"
295                "       cmp     %%o4, %%o5\n\t"
296                "       bne     1b\n\t"
297                "        add    %%o5, %3, %%o5"
298                : "=&r" (tmp), "=&r" (ret)
299                : "r" (dest), "r" (add)
300                : "memory", "cc");
301
302         return ret;
303 }
304
305 #elif __s390x__
306
307 static inline gint32 
308 InterlockedCompareExchange(volatile gint32 *dest,
309                            gint32 exch, gint32 comp)
310 {
311         gint32 old;
312
313         __asm__ __volatile__ ("\tLA\t1,%0\n"
314                               "\tLR\t%1,%3\n"
315                               "\tCS\t%1,%2,0(1)\n"
316                               : "+m" (*dest), "=&r" (old)
317                               : "r" (exch), "r" (comp)
318                               : "1", "cc");     
319         return(old);
320 }
321
322 static inline gpointer 
323 InterlockedCompareExchangePointer(volatile gpointer *dest, 
324                                   gpointer exch, 
325                                   gpointer comp)
326 {
327         gpointer old;
328
329         __asm__ __volatile__ ("\tLA\t1,%0\n"
330                               "\tLGR\t%1,%3\n"
331                               "\tCSG\t%1,%2,0(1)\n"
332                               : "+m" (*dest), "=&r" (old)
333                               : "r" (exch), "r" (comp)
334                               : "1", "cc");
335
336         return(old);
337 }
338
339 static inline gint32 
340 InterlockedIncrement(volatile gint32 *val)
341 {
342         gint32 tmp;
343         
344         __asm__ __volatile__ ("\tLA\t2,%1\n"
345                               "0:\tLGF\t%0,%1\n"
346                               "\tLGFR\t1,%0\n"
347                               "\tAGHI\t1,1\n"
348                               "\tCS\t%0,1,0(2)\n"
349                               "\tJNZ\t0b\n"
350                               "\tLGFR\t%0,1"
351                               : "=r" (tmp), "+m" (*val)
352                               : : "1", "2", "cc");
353
354         return(tmp);
355 }
356
357 static inline gint32 
358 InterlockedDecrement(volatile gint32 *val)
359 {
360         gint32 tmp;
361         
362         __asm__ __volatile__ ("\tLA\t2,%1\n"
363                               "0:\tLGF\t%0,%1\n"
364                               "\tLGFR\t1,%0\n"
365                               "\tAGHI\t1,-1\n"
366                               "\tCS\t%0,1,0(2)\n"
367                               "\tJNZ\t0b\n"
368                               "\tLGFR\t%0,1"
369                               : "=r" (tmp), "+m" (*val)
370                               : : "1", "2", "cc");
371
372         return(tmp);
373 }
374
375 static inline gint32 
376 InterlockedExchange(volatile gint32 *val, gint32 new_val)
377 {
378         gint32 ret;
379         
380         __asm__ __volatile__ ("\tLA\t1,%0\n"
381                               "0:\tL\t%1,%0\n"
382                               "\tCS\t%1,%2,0(1)\n"
383                               "\tJNZ\t0b"
384                               : "+m" (*val), "=&r" (ret)
385                               : "r" (new_val)
386                               : "1", "cc");
387
388         return(ret);
389 }
390
391 static inline gpointer
392 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
393 {
394         gpointer ret;
395         
396         __asm__ __volatile__ ("\tLA\t1,%0\n"
397                               "0:\tLG\t%1,%0\n"
398                               "\tCSG\t%1,%2,0(1)\n"
399                               "\tJNZ\t0b"
400                               : "+m" (*val), "=&r" (ret)
401                               : "r" (new_val)
402                               : "1", "cc");
403
404         return(ret);
405 }
406
407 static inline gint32 
408 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
409 {
410         gint32 ret;
411
412         __asm__ __volatile__ ("\tLA\t2,%1\n"
413                               "0:\tLGF\t%0,%1\n"
414                               "\tLGFR\t1,%0\n"
415                               "\tAGR\t1,%2\n"
416                               "\tCS\t%0,1,0(2)\n"
417                               "\tJNZ\t0b"
418                               : "=&r" (ret), "+m" (*val)
419                               : "r" (add) 
420                               : "1", "2", "cc");
421         
422         return(ret);
423 }
424
425 #elif defined(__ia64__)
426
427 #ifdef __INTEL_COMPILER
428 #include <ia64intrin.h>
429 #endif
430
431 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
432                                                 gint32 exch, gint32 comp)
433 {
434         gint32 old;
435         guint64 real_comp;
436
437 #ifdef __INTEL_COMPILER
438         old = _InterlockedCompareExchange (dest, exch, comp);
439 #else
440         /* cmpxchg4 zero extends the value read from memory */
441         real_comp = (guint64)(guint32)comp;
442         asm volatile ("mov ar.ccv = %2 ;;\n\t"
443                                   "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
444                                   : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
445 #endif
446
447         return(old);
448 }
449
450 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
451                                                 gpointer exch, gpointer comp)
452 {
453         gpointer old;
454
455 #ifdef __INTEL_COMPILER
456         old = _InterlockedCompareExchangePointer (dest, exch, comp);
457 #else
458         asm volatile ("mov ar.ccv = %2 ;;\n\t"
459                                   "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
460                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
461 #endif
462
463         return(old);
464 }
465
466 static inline gint32 InterlockedIncrement(gint32 volatile *val)
467 {
468 #ifdef __INTEL_COMPILER
469         return _InterlockedIncrement (val);
470 #else
471         gint32 old;
472
473         do {
474                 old = *val;
475         } while (InterlockedCompareExchange (val, old + 1, old) != old);
476
477         return old + 1;
478 #endif
479 }
480
481 static inline gint32 InterlockedDecrement(gint32 volatile *val)
482 {
483 #ifdef __INTEL_COMPILER
484         return _InterlockedDecrement (val);
485 #else
486         gint32 old;
487
488         do {
489                 old = *val;
490         } while (InterlockedCompareExchange (val, old - 1, old) != old);
491
492         return old - 1;
493 #endif
494 }
495
496 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
497 {
498 #ifdef __INTEL_COMPILER
499         return _InterlockedExchange (dest, new_val);
500 #else
501         gint32 res;
502
503         do {
504                 res = *dest;
505         } while (InterlockedCompareExchange (dest, new_val, res) != res);
506
507         return res;
508 #endif
509 }
510
511 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
512 {
513 #ifdef __INTEL_COMPILER
514         return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
515 #else
516         gpointer res;
517
518         do {
519                 res = *dest;
520         } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
521
522         return res;
523 #endif
524 }
525
526 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
527 {
528         gint32 old;
529
530 #ifdef __INTEL_COMPILER
531         old = _InterlockedExchangeAdd (val, add);
532 #else
533         do {
534                 old = *val;
535         } while (InterlockedCompareExchange (val, old + add, old) != old);
536
537         return old;
538 #endif
539 }
540
541 #else
542
543 #define WAPI_NO_ATOMIC_ASM
544
545 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
546 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
547 extern gint32 InterlockedIncrement(volatile gint32 *dest);
548 extern gint32 InterlockedDecrement(volatile gint32 *dest);
549 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
550 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
551 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
552
553 #endif
554
555 #ifndef HAS_64BITS_ATOMICS
556 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
557 #endif
558
559 #endif /* _WAPI_ATOMIC_H_ */