Merge pull request #751 from akoeplinger/AssemblyNameReferenceMatchesDefinition
[mono.git] / mono / utils / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  * Copyright 2012 Xamarin Inc
9  */
10
11 #ifndef _WAPI_ATOMIC_H_
12 #define _WAPI_ATOMIC_H_
13
14 #if defined(__NetBSD__)
15 #include <sys/param.h>
16
17 #if __NetBSD_Version__ > 499004000
18 #include <sys/atomic.h>
19 #define HAVE_ATOMIC_OPS
20 #endif
21
22 #endif
23
24 #include "config.h"
25 #include <glib.h>
26
27 /* On Windows, we always use the functions provided by the Windows API. */
28 #if defined(__WIN32__) || defined(_WIN32)
29
30 #include <windows.h>
31 #define HAS_64BITS_ATOMICS 1
32
33 /* Prefer GCC atomic ops if the target supports it (see configure.in). */
34 #elif defined(USE_GCC_ATOMIC_OPS)
35
36 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
37                                                 gint32 exch, gint32 comp)
38 {
39         return __sync_val_compare_and_swap (dest, comp, exch);
40 }
41
42 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
43 {
44         return __sync_val_compare_and_swap (dest, comp, exch);
45 }
46
47 static inline gint32 InterlockedIncrement(volatile gint32 *val)
48 {
49         return __sync_add_and_fetch (val, 1);
50 }
51
52 static inline gint32 InterlockedDecrement(volatile gint32 *val)
53 {
54         return __sync_add_and_fetch (val, -1);
55 }
56
57 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
58 {
59         gint32 old_val;
60         do {
61                 old_val = *val;
62         } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
63         return old_val;
64 }
65
66 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
67                                                   gpointer new_val)
68 {
69         gpointer old_val;
70         do {
71                 old_val = *val;
72         } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
73         return old_val;
74 }
75
76 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
77 {
78         return __sync_fetch_and_add (val, add);
79 }
80
81 #if defined (TARGET_ARM) && defined (TARGET_MACH)
82 #define BROKEN_64BIT_ATOMICS_INTRINSIC 1
83 #endif
84
85
86 #if !defined (BROKEN_64BIT_ATOMICS_INTRINSIC)
87 #define HAS_64BITS_ATOMICS 1
88
89 static inline gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp)
90 {
91         return __sync_val_compare_and_swap (dest, comp, exch);
92 }
93
94 #endif
95
96
97 #elif defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
98
99 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
100        gint32 exch, gint32 comp)
101 {
102        return atomic_cas_32((uint32_t*)dest, comp, exch);
103 }
104
105 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
106 {
107        return atomic_cas_ptr(dest, comp, exch);
108 }
109
110 static inline gint32 InterlockedIncrement(volatile gint32 *val)
111 {
112        return atomic_inc_32_nv((uint32_t*)val);
113 }
114
115 static inline gint32 InterlockedDecrement(volatile gint32 *val)
116 {
117        return atomic_dec_32_nv((uint32_t*)val);
118 }
119
120 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
121 {
122        return atomic_swap_32((uint32_t*)val, new_val);
123 }
124
125 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
126                gpointer new_val)
127 {
128        return atomic_swap_ptr(val, new_val);
129 }
130
131 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
132 {
133        return atomic_add_32_nv((uint32_t*)val, add) - add;
134 }
135
136 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
137
138 G_GNUC_UNUSED 
139 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
140 {
141        register volatile gint32 *dest asm("g1") = _dest;
142        register gint32 comp asm("o4") = _comp;
143        register gint32 exch asm("o5") = _exch;
144
145        __asm__ __volatile__(
146                /* cas [%%g1], %%o4, %%o5 */
147                ".word 0xdbe0500c"
148                : "=r" (exch)
149                : "0" (exch), "r" (dest), "r" (comp)
150                : "memory");
151
152        return exch;
153 }
154
155 G_GNUC_UNUSED 
156 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
157 {
158        register volatile gpointer *dest asm("g1") = _dest;
159        register gpointer comp asm("o4") = _comp;
160        register gpointer exch asm("o5") = _exch;
161
162        __asm__ __volatile__(
163 #ifdef SPARCV9
164                /* casx [%%g1], %%o4, %%o5 */
165                ".word 0xdbf0500c"
166 #else
167                /* cas [%%g1], %%o4, %%o5 */
168                ".word 0xdbe0500c"
169 #endif
170                : "=r" (exch)
171                : "0" (exch), "r" (dest), "r" (comp)
172                : "memory");
173
174        return exch;
175 }
176
177 G_GNUC_UNUSED 
178 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
179 {
180        register volatile gint32 *dest asm("g1") = _dest;
181        register gint32 tmp asm("o4");
182        register gint32 ret asm("o5");
183
184        __asm__ __volatile__(
185                "1:     ld      [%%g1], %%o4\n\t"
186                "       add     %%o4, 1, %%o5\n\t"
187                /*      cas     [%%g1], %%o4, %%o5 */
188                "       .word   0xdbe0500c\n\t"
189                "       cmp     %%o4, %%o5\n\t"
190                "       bne     1b\n\t"
191                "        add    %%o5, 1, %%o5"
192                : "=&r" (tmp), "=&r" (ret)
193                : "r" (dest)
194                : "memory", "cc");
195
196         return ret;
197 }
198
199 G_GNUC_UNUSED 
200 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
201 {
202        register volatile gint32 *dest asm("g1") = _dest;
203        register gint32 tmp asm("o4");
204        register gint32 ret asm("o5");
205
206        __asm__ __volatile__(
207                "1:     ld      [%%g1], %%o4\n\t"
208                "       sub     %%o4, 1, %%o5\n\t"
209                /*      cas     [%%g1], %%o4, %%o5 */
210                "       .word   0xdbe0500c\n\t"
211                "       cmp     %%o4, %%o5\n\t"
212                "       bne     1b\n\t"
213                "        sub    %%o5, 1, %%o5"
214                : "=&r" (tmp), "=&r" (ret)
215                : "r" (dest)
216                : "memory", "cc");
217
218         return ret;
219 }
220
221 G_GNUC_UNUSED
222 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
223 {
224        register volatile gint32 *dest asm("g1") = _dest;
225        register gint32 tmp asm("o4");
226        register gint32 ret asm("o5");
227
228        __asm__ __volatile__(
229                "1:     ld      [%%g1], %%o4\n\t"
230                "       mov     %3, %%o5\n\t"
231                /*      cas     [%%g1], %%o4, %%o5 */
232                "       .word   0xdbe0500c\n\t"
233                "       cmp     %%o4, %%o5\n\t"
234                "       bne     1b\n\t"
235                "        nop"
236                : "=&r" (tmp), "=&r" (ret)
237                : "r" (dest), "r" (exch)
238                : "memory", "cc");
239
240         return ret;
241 }
242
243 G_GNUC_UNUSED
244 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
245 {
246        register volatile gpointer *dest asm("g1") = _dest;
247        register gpointer tmp asm("o4");
248        register gpointer ret asm("o5");
249
250        __asm__ __volatile__(
251 #ifdef SPARCV9
252                "1:     ldx     [%%g1], %%o4\n\t"
253 #else
254                "1:     ld      [%%g1], %%o4\n\t"
255 #endif
256                "       mov     %3, %%o5\n\t"
257 #ifdef SPARCV9
258                /*      casx    [%%g1], %%o4, %%o5 */
259                "       .word   0xdbf0500c\n\t"
260 #else
261                /*      cas     [%%g1], %%o4, %%o5 */
262                "       .word   0xdbe0500c\n\t"
263 #endif
264                "       cmp     %%o4, %%o5\n\t"
265                "       bne     1b\n\t"
266                "        nop"
267                : "=&r" (tmp), "=&r" (ret)
268                : "r" (dest), "r" (exch)
269                : "memory", "cc");
270
271         return ret;
272 }
273
274 G_GNUC_UNUSED
275 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
276 {
277        register volatile gint32 *dest asm("g1") = _dest;
278        register gint32 tmp asm("o4");
279        register gint32 ret asm("o5");
280
281        __asm__ __volatile__(
282                "1:     ld      [%%g1], %%o4\n\t"
283                "       add     %%o4, %3, %%o5\n\t"
284                /*      cas     [%%g1], %%o4, %%o5 */
285                "       .word   0xdbe0500c\n\t"
286                "       cmp     %%o4, %%o5\n\t"
287                "       bne     1b\n\t"
288                "        add    %%o5, %3, %%o5"
289                : "=&r" (tmp), "=&r" (ret)
290                : "r" (dest), "r" (add)
291                : "memory", "cc");
292
293         return ret;
294 }
295
296 #elif __s390x__
297
298 static inline gint32 
299 InterlockedCompareExchange(volatile gint32 *dest,
300                            gint32 exch, gint32 comp)
301 {
302         gint32 old;
303
304         __asm__ __volatile__ ("\tLA\t1,%0\n"
305                               "\tLR\t%1,%3\n"
306                               "\tCS\t%1,%2,0(1)\n"
307                               : "+m" (*dest), "=&r" (old)
308                               : "r" (exch), "r" (comp)
309                               : "1", "cc");     
310         return(old);
311 }
312
313 static inline gpointer 
314 InterlockedCompareExchangePointer(volatile gpointer *dest, 
315                                   gpointer exch, 
316                                   gpointer comp)
317 {
318         gpointer old;
319
320         __asm__ __volatile__ ("\tLA\t1,%0\n"
321                               "\tLGR\t%1,%3\n"
322                               "\tCSG\t%1,%2,0(1)\n"
323                               : "+m" (*dest), "=&r" (old)
324                               : "r" (exch), "r" (comp)
325                               : "1", "cc");
326
327         return(old);
328 }
329
330 static inline gint32 
331 InterlockedIncrement(volatile gint32 *val)
332 {
333         gint32 tmp;
334         
335         __asm__ __volatile__ ("\tLA\t2,%1\n"
336                               "0:\tLGF\t%0,%1\n"
337                               "\tLGFR\t1,%0\n"
338                               "\tAGHI\t1,1\n"
339                               "\tCS\t%0,1,0(2)\n"
340                               "\tJNZ\t0b\n"
341                               "\tLGFR\t%0,1"
342                               : "=r" (tmp), "+m" (*val)
343                               : : "1", "2", "cc");
344
345         return(tmp);
346 }
347
348 static inline gint32 
349 InterlockedDecrement(volatile gint32 *val)
350 {
351         gint32 tmp;
352         
353         __asm__ __volatile__ ("\tLA\t2,%1\n"
354                               "0:\tLGF\t%0,%1\n"
355                               "\tLGFR\t1,%0\n"
356                               "\tAGHI\t1,-1\n"
357                               "\tCS\t%0,1,0(2)\n"
358                               "\tJNZ\t0b\n"
359                               "\tLGFR\t%0,1"
360                               : "=r" (tmp), "+m" (*val)
361                               : : "1", "2", "cc");
362
363         return(tmp);
364 }
365
366 static inline gint32 
367 InterlockedExchange(volatile gint32 *val, gint32 new_val)
368 {
369         gint32 ret;
370         
371         __asm__ __volatile__ ("\tLA\t1,%0\n"
372                               "0:\tL\t%1,%0\n"
373                               "\tCS\t%1,%2,0(1)\n"
374                               "\tJNZ\t0b"
375                               : "+m" (*val), "=&r" (ret)
376                               : "r" (new_val)
377                               : "1", "cc");
378
379         return(ret);
380 }
381
382 static inline gpointer
383 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
384 {
385         gpointer ret;
386         
387         __asm__ __volatile__ ("\tLA\t1,%0\n"
388                               "0:\tLG\t%1,%0\n"
389                               "\tCSG\t%1,%2,0(1)\n"
390                               "\tJNZ\t0b"
391                               : "+m" (*val), "=&r" (ret)
392                               : "r" (new_val)
393                               : "1", "cc");
394
395         return(ret);
396 }
397
398 static inline gint32 
399 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
400 {
401         gint32 ret;
402
403         __asm__ __volatile__ ("\tLA\t2,%1\n"
404                               "0:\tLGF\t%0,%1\n"
405                               "\tLGFR\t1,%0\n"
406                               "\tAGR\t1,%2\n"
407                               "\tCS\t%0,1,0(2)\n"
408                               "\tJNZ\t0b"
409                               : "=&r" (ret), "+m" (*val)
410                               : "r" (add) 
411                               : "1", "2", "cc");
412         
413         return(ret);
414 }
415
416 #elif defined(__ia64__)
417
418 #ifdef __INTEL_COMPILER
419 #include <ia64intrin.h>
420 #endif
421
422 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
423                                                 gint32 exch, gint32 comp)
424 {
425         gint32 old;
426         guint64 real_comp;
427
428 #ifdef __INTEL_COMPILER
429         old = _InterlockedCompareExchange (dest, exch, comp);
430 #else
431         /* cmpxchg4 zero extends the value read from memory */
432         real_comp = (guint64)(guint32)comp;
433         asm volatile ("mov ar.ccv = %2 ;;\n\t"
434                                   "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
435                                   : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
436 #endif
437
438         return(old);
439 }
440
441 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
442                                                 gpointer exch, gpointer comp)
443 {
444         gpointer old;
445
446 #ifdef __INTEL_COMPILER
447         old = _InterlockedCompareExchangePointer (dest, exch, comp);
448 #else
449         asm volatile ("mov ar.ccv = %2 ;;\n\t"
450                                   "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
451                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
452 #endif
453
454         return(old);
455 }
456
457 static inline gint32 InterlockedIncrement(gint32 volatile *val)
458 {
459 #ifdef __INTEL_COMPILER
460         return _InterlockedIncrement (val);
461 #else
462         gint32 old;
463
464         do {
465                 old = *val;
466         } while (InterlockedCompareExchange (val, old + 1, old) != old);
467
468         return old + 1;
469 #endif
470 }
471
472 static inline gint32 InterlockedDecrement(gint32 volatile *val)
473 {
474 #ifdef __INTEL_COMPILER
475         return _InterlockedDecrement (val);
476 #else
477         gint32 old;
478
479         do {
480                 old = *val;
481         } while (InterlockedCompareExchange (val, old - 1, old) != old);
482
483         return old - 1;
484 #endif
485 }
486
487 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
488 {
489 #ifdef __INTEL_COMPILER
490         return _InterlockedExchange (dest, new_val);
491 #else
492         gint32 res;
493
494         do {
495                 res = *dest;
496         } while (InterlockedCompareExchange (dest, new_val, res) != res);
497
498         return res;
499 #endif
500 }
501
502 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
503 {
504 #ifdef __INTEL_COMPILER
505         return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
506 #else
507         gpointer res;
508
509         do {
510                 res = *dest;
511         } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
512
513         return res;
514 #endif
515 }
516
517 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
518 {
519         gint32 old;
520
521 #ifdef __INTEL_COMPILER
522         old = _InterlockedExchangeAdd (val, add);
523 #else
524         do {
525                 old = *val;
526         } while (InterlockedCompareExchange (val, old + add, old) != old);
527
528         return old;
529 #endif
530 }
531
532 #else
533
534 #define WAPI_NO_ATOMIC_ASM
535
536 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
537 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
538 extern gint32 InterlockedIncrement(volatile gint32 *dest);
539 extern gint32 InterlockedDecrement(volatile gint32 *dest);
540 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
541 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
542 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
543
544 #endif
545
546 #ifndef HAS_64BITS_ATOMICS
547 extern gint64 InterlockedCompareExchange64(volatile gint64 *dest, gint64 exch, gint64 comp);
548 #endif
549
550 #endif /* _WAPI_ATOMIC_H_ */