Fix the OSX and Windows build.
[mono.git] / mono / utils / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  * Copyright 2012 Xamarin Inc
9  */
10
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
13
14 #if defined(__NetBSD__)
15 #include <sys/param.h>
16
17 #if __NetBSD_Version__ > 499004000
18 #include <sys/atomic.h>
19 #define HAVE_ATOMIC_OPS
20 #endif
21
22 #endif
23
24 #include "config.h"
25 #include <glib.h>
26
27 #ifdef ENABLE_EXTENSION_MODULE
28 #include "../../../mono-extensions/mono/utils/atomic.h"
29 #endif
30
31 /* On Windows, we always use the functions provided by the Windows API. */
32 #if defined(__WIN32__) || defined(_WIN32)
33
34 #include <windows.h>
35 #define HAS_64BITS_ATOMICS 1
36
37 /* mingw is missing InterlockedCompareExchange64 () from winbase.h */
38 #ifdef __MINGW32__
39 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
40 {
41         return __sync_val_compare_and_swap (dest, comp, exch);
42 }
43 #endif
44
45 /* Prefer GCC atomic ops if the target supports it (see configure.in). */
46 #elif defined(USE_GCC_ATOMIC_OPS)
47
48 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
49                                                 gint32 exch, gint32 comp)
50 {
51         return __sync_val_compare_and_swap (dest, comp, exch);
52 }
53
54 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
55 {
56         return __sync_val_compare_and_swap (dest, comp, exch);
57 }
58
59 static inline gint32 InterlockedIncrement(volatile gint32 *val)
60 {
61         return __sync_add_and_fetch (val, 1);
62 }
63
64 static inline gint32 InterlockedDecrement(volatile gint32 *val)
65 {
66         return __sync_add_and_fetch (val, -1);
67 }
68
69 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
70 {
71         gint32 old_val;
72         do {
73                 old_val = *val;
74         } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
75         return old_val;
76 }
77
78 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
79                                                   gpointer new_val)
80 {
81         gpointer old_val;
82         do {
83                 old_val = *val;
84         } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
85         return old_val;
86 }
87
88 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
89 {
90         return __sync_fetch_and_add (val, add);
91 }
92
93 #if defined (TARGET_OSX)
94 #define BROKEN_64BIT_ATOMICS_INTRINSIC 1
95 #endif
96
97
98 #if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
99 #define HAS_64BITS_ATOMICS 1
100
101 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
102 {
103         return __sync_val_compare_and_swap (dest, comp, exch);
104 }
105
106 #endif
107
108
109 #elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
110
111 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
112        gint32 exch, gint32 comp)
113 {
114        return atomic_cas_32((uint32_t*)dest, comp, exch);
115 }
116
117 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
118 {
119        return atomic_cas_ptr(dest, comp, exch);
120 }
121
122 static inline gint32 InterlockedIncrement(volatile gint32 *val)
123 {
124        return atomic_inc_32_nv((uint32_t*)val);
125 }
126
127 static inline gint32 InterlockedDecrement(volatile gint32 *val)
128 {
129        return atomic_dec_32_nv((uint32_t*)val);
130 }
131
132 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
133 {
134        return atomic_swap_32((uint32_t*)val, new_val);
135 }
136
137 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
138                gpointer new_val)
139 {
140        return atomic_swap_ptr(val, new_val);
141 }
142
143 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
144 {
145        return atomic_add_32_nv((uint32_t*)val, add) - add;
146 }
147
148 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
149
150 G_GNUC_UNUSED 
151 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
152 {
153        register volatile gint32 *dest asm("g1") = _dest;
154        register gint32 comp asm("o4") = _comp;
155        register gint32 exch asm("o5") = _exch;
156
157        __asm__ __volatile__(
158                /* cas [%%g1], %%o4, %%o5 */
159                ".word 0xdbe0500c"
160                : "=r" (exch)
161                : "0" (exch), "r" (dest), "r" (comp)
162                : "memory");
163
164        return exch;
165 }
166
167 G_GNUC_UNUSED 
168 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
169 {
170        register volatile gpointer *dest asm("g1") = _dest;
171        register gpointer comp asm("o4") = _comp;
172        register gpointer exch asm("o5") = _exch;
173
174        __asm__ __volatile__(
175 #ifdef SPARCV9
176                /* casx [%%g1], %%o4, %%o5 */
177                ".word 0xdbf0500c"
178 #else
179                /* cas [%%g1], %%o4, %%o5 */
180                ".word 0xdbe0500c"
181 #endif
182                : "=r" (exch)
183                : "0" (exch), "r" (dest), "r" (comp)
184                : "memory");
185
186        return exch;
187 }
188
189 G_GNUC_UNUSED 
190 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
191 {
192        register volatile gint32 *dest asm("g1") = _dest;
193        register gint32 tmp asm("o4");
194        register gint32 ret asm("o5");
195
196        __asm__ __volatile__(
197                "1:     ld      [%%g1], %%o4\n\t"
198                "       add     %%o4, 1, %%o5\n\t"
199                /*      cas     [%%g1], %%o4, %%o5 */
200                "       .word   0xdbe0500c\n\t"
201                "       cmp     %%o4, %%o5\n\t"
202                "       bne     1b\n\t"
203                "        add    %%o5, 1, %%o5"
204                : "=&r" (tmp), "=&r" (ret)
205                : "r" (dest)
206                : "memory", "cc");
207
208         return ret;
209 }
210
211 G_GNUC_UNUSED 
212 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
213 {
214        register volatile gint32 *dest asm("g1") = _dest;
215        register gint32 tmp asm("o4");
216        register gint32 ret asm("o5");
217
218        __asm__ __volatile__(
219                "1:     ld      [%%g1], %%o4\n\t"
220                "       sub     %%o4, 1, %%o5\n\t"
221                /*      cas     [%%g1], %%o4, %%o5 */
222                "       .word   0xdbe0500c\n\t"
223                "       cmp     %%o4, %%o5\n\t"
224                "       bne     1b\n\t"
225                "        sub    %%o5, 1, %%o5"
226                : "=&r" (tmp), "=&r" (ret)
227                : "r" (dest)
228                : "memory", "cc");
229
230         return ret;
231 }
232
233 G_GNUC_UNUSED
234 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
235 {
236        register volatile gint32 *dest asm("g1") = _dest;
237        register gint32 tmp asm("o4");
238        register gint32 ret asm("o5");
239
240        __asm__ __volatile__(
241                "1:     ld      [%%g1], %%o4\n\t"
242                "       mov     %3, %%o5\n\t"
243                /*      cas     [%%g1], %%o4, %%o5 */
244                "       .word   0xdbe0500c\n\t"
245                "       cmp     %%o4, %%o5\n\t"
246                "       bne     1b\n\t"
247                "        nop"
248                : "=&r" (tmp), "=&r" (ret)
249                : "r" (dest), "r" (exch)
250                : "memory", "cc");
251
252         return ret;
253 }
254
255 G_GNUC_UNUSED
256 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
257 {
258        register volatile gpointer *dest asm("g1") = _dest;
259        register gpointer tmp asm("o4");
260        register gpointer ret asm("o5");
261
262        __asm__ __volatile__(
263 #ifdef SPARCV9
264                "1:     ldx     [%%g1], %%o4\n\t"
265 #else
266                "1:     ld      [%%g1], %%o4\n\t"
267 #endif
268                "       mov     %3, %%o5\n\t"
269 #ifdef SPARCV9
270                /*      casx    [%%g1], %%o4, %%o5 */
271                "       .word   0xdbf0500c\n\t"
272 #else
273                /*      cas     [%%g1], %%o4, %%o5 */
274                "       .word   0xdbe0500c\n\t"
275 #endif
276                "       cmp     %%o4, %%o5\n\t"
277                "       bne     1b\n\t"
278                "        nop"
279                : "=&r" (tmp), "=&r" (ret)
280                : "r" (dest), "r" (exch)
281                : "memory", "cc");
282
283         return ret;
284 }
285
286 G_GNUC_UNUSED
287 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
288 {
289        register volatile gint32 *dest asm("g1") = _dest;
290        register gint32 tmp asm("o4");
291        register gint32 ret asm("o5");
292
293        __asm__ __volatile__(
294                "1:     ld      [%%g1], %%o4\n\t"
295                "       add     %%o4, %3, %%o5\n\t"
296                /*      cas     [%%g1], %%o4, %%o5 */
297                "       .word   0xdbe0500c\n\t"
298                "       cmp     %%o4, %%o5\n\t"
299                "       bne     1b\n\t"
300                "        add    %%o5, %3, %%o5"
301                : "=&r" (tmp), "=&r" (ret)
302                : "r" (dest), "r" (add)
303                : "memory", "cc");
304
305         return ret;
306 }
307
308 #elif __s390x__
309
310 static inline gint32 
311 InterlockedCompareExchange(volatile gint32 *dest,
312                            gint32 exch, gint32 comp)
313 {
314         gint32 old;
315
316         __asm__ __volatile__ ("\tLA\t1,%0\n"
317                               "\tLR\t%1,%3\n"
318                               "\tCS\t%1,%2,0(1)\n"
319                               : "+m" (*dest), "=&r" (old)
320                               : "r" (exch), "r" (comp)
321                               : "1", "cc");     
322         return(old);
323 }
324
325 static inline gpointer 
326 InterlockedCompareExchangePointer(volatile gpointer *dest, 
327                                   gpointer exch, 
328                                   gpointer comp)
329 {
330         gpointer old;
331
332         __asm__ __volatile__ ("\tLA\t1,%0\n"
333                               "\tLGR\t%1,%3\n"
334                               "\tCSG\t%1,%2,0(1)\n"
335                               : "+m" (*dest), "=&r" (old)
336                               : "r" (exch), "r" (comp)
337                               : "1", "cc");
338
339         return(old);
340 }
341
342 static inline gint32 
343 InterlockedIncrement(volatile gint32 *val)
344 {
345         gint32 tmp;
346         
347         __asm__ __volatile__ ("\tLA\t2,%1\n"
348                               "0:\tLGF\t%0,%1\n"
349                               "\tLGFR\t1,%0\n"
350                               "\tAGHI\t1,1\n"
351                               "\tCS\t%0,1,0(2)\n"
352                               "\tJNZ\t0b\n"
353                               "\tLGFR\t%0,1"
354                               : "=r" (tmp), "+m" (*val)
355                               : : "1", "2", "cc");
356
357         return(tmp);
358 }
359
360 static inline gint32 
361 InterlockedDecrement(volatile gint32 *val)
362 {
363         gint32 tmp;
364         
365         __asm__ __volatile__ ("\tLA\t2,%1\n"
366                               "0:\tLGF\t%0,%1\n"
367                               "\tLGFR\t1,%0\n"
368                               "\tAGHI\t1,-1\n"
369                               "\tCS\t%0,1,0(2)\n"
370                               "\tJNZ\t0b\n"
371                               "\tLGFR\t%0,1"
372                               : "=r" (tmp), "+m" (*val)
373                               : : "1", "2", "cc");
374
375         return(tmp);
376 }
377
378 static inline gint32 
379 InterlockedExchange(volatile gint32 *val, gint32 new_val)
380 {
381         gint32 ret;
382         
383         __asm__ __volatile__ ("\tLA\t1,%0\n"
384                               "0:\tL\t%1,%0\n"
385                               "\tCS\t%1,%2,0(1)\n"
386                               "\tJNZ\t0b"
387                               : "+m" (*val), "=&r" (ret)
388                               : "r" (new_val)
389                               : "1", "cc");
390
391         return(ret);
392 }
393
394 static inline gpointer
395 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
396 {
397         gpointer ret;
398         
399         __asm__ __volatile__ ("\tLA\t1,%0\n"
400                               "0:\tLG\t%1,%0\n"
401                               "\tCSG\t%1,%2,0(1)\n"
402                               "\tJNZ\t0b"
403                               : "+m" (*val), "=&r" (ret)
404                               : "r" (new_val)
405                               : "1", "cc");
406
407         return(ret);
408 }
409
410 static inline gint32 
411 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
412 {
413         gint32 ret;
414
415         __asm__ __volatile__ ("\tLA\t2,%1\n"
416                               "0:\tLGF\t%0,%1\n"
417                               "\tLGFR\t1,%0\n"
418                               "\tAGR\t1,%2\n"
419                               "\tCS\t%0,1,0(2)\n"
420                               "\tJNZ\t0b"
421                               : "=&r" (ret), "+m" (*val)
422                               : "r" (add) 
423                               : "1", "2", "cc");
424         
425         return(ret);
426 }
427
428 #elif defined(__ia64__)
429
430 #ifdef __INTEL_COMPILER
431 #include <ia64intrin.h>
432 #endif
433
434 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
435                                                 gint32 exch, gint32 comp)
436 {
437         gint32 old;
438         guint64 real_comp;
439
440 #ifdef __INTEL_COMPILER
441         old = _InterlockedCompareExchange (dest, exch, comp);
442 #else
443         /* cmpxchg4 zero extends the value read from memory */
444         real_comp = (guint64)(guint32)comp;
445         asm volatile ("mov ar.ccv = %2 ;;\n\t"
446                                   "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
447                                   : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
448 #endif
449
450         return(old);
451 }
452
453 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
454                                                 gpointer exch, gpointer comp)
455 {
456         gpointer old;
457
458 #ifdef __INTEL_COMPILER
459         old = _InterlockedCompareExchangePointer (dest, exch, comp);
460 #else
461         asm volatile ("mov ar.ccv = %2 ;;\n\t"
462                                   "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
463                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
464 #endif
465
466         return(old);
467 }
468
469 static inline gint32 InterlockedIncrement(gint32 volatile *val)
470 {
471 #ifdef __INTEL_COMPILER
472         return _InterlockedIncrement (val);
473 #else
474         gint32 old;
475
476         do {
477                 old = *val;
478         } while (InterlockedCompareExchange (val, old + 1, old) != old);
479
480         return old + 1;
481 #endif
482 }
483
484 static inline gint32 InterlockedDecrement(gint32 volatile *val)
485 {
486 #ifdef __INTEL_COMPILER
487         return _InterlockedDecrement (val);
488 #else
489         gint32 old;
490
491         do {
492                 old = *val;
493         } while (InterlockedCompareExchange (val, old - 1, old) != old);
494
495         return old - 1;
496 #endif
497 }
498
499 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
500 {
501 #ifdef __INTEL_COMPILER
502         return _InterlockedExchange (dest, new_val);
503 #else
504         gint32 res;
505
506         do {
507                 res = *dest;
508         } while (InterlockedCompareExchange (dest, new_val, res) != res);
509
510         return res;
511 #endif
512 }
513
514 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
515 {
516 #ifdef __INTEL_COMPILER
517         return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
518 #else
519         gpointer res;
520
521         do {
522                 res = *dest;
523         } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
524
525         return res;
526 #endif
527 }
528
529 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
530 {
531         gint32 old;
532
533 #ifdef __INTEL_COMPILER
534         old = _InterlockedExchangeAdd (val, add);
535 #else
536         do {
537                 old = *val;
538         } while (InterlockedCompareExchange (val, old + add, old) != old);
539
540         return old;
541 #endif
542 }
543
544 #else
545
546 #define WAPI_NO_ATOMIC_ASM
547
548 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
549 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
550 extern gint32 InterlockedIncrement(volatile gint32 *dest);
551 extern gint32 InterlockedDecrement(volatile gint32 *dest);
552 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
553 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
554 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
555
556 #endif
557
558 #ifndef HAS_64BITS_ATOMICS
559 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
560 #endif
561
562 #endif /* _WAPI_ATOMIC_H_ */