Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client
[mono.git] / mono / io-layer / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  */
9
10 #ifndef _WAPI_ATOMIC_H_
11 #define _WAPI_ATOMIC_H_
12
13 #if defined(__NetBSD__)
14 #include <sys/param.h>
15
16 #if __NetBSD_Version__ > 499004000
17 #include <sys/atomic.h>
18 #define HAVE_ATOMIC_OPS
19 #endif
20
21 #endif
22
23 #include <glib.h>
24
25 #include "mono/io-layer/wapi.h"
26
27 #if defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
28
29 #define WAPI_ATOMIC_ASM
30 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
31        gint32 exch, gint32 comp)
32 {
33        return atomic_cas_32((uint32_t*)dest, comp, exch);
34 }
35
36 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
37 {
38        return atomic_cas_ptr(dest, comp, exch);
39 }
40
41 static inline gint32 InterlockedIncrement(volatile gint32 *val)
42 {
43        return atomic_inc_32_nv((uint32_t*)val);
44 }
45
46 static inline gint32 InterlockedDecrement(volatile gint32 *val)
47 {
48        return atomic_dec_32_nv((uint32_t*)val);
49 }
50
51 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
52 {
53        return atomic_swap_32((uint32_t*)val, new_val);
54 }
55
56 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
57                gpointer new_val)
58 {
59        return atomic_swap_ptr(val, new_val);
60 }
61
62 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
63 {
64        return atomic_add_32_nv((uint32_t*)val, add) - add;
65 }
66
67 #elif defined(__i386__) || defined(__x86_64__)
68 #define WAPI_ATOMIC_ASM
69
70 /*
71  * NB: The *Pointer() functions here assume that
72  * sizeof(pointer)==sizeof(gint32)
73  *
74  * NB2: These asm functions assume 486+ (some of the opcodes dont
75  * exist on 386).  If this becomes an issue, we can get configure to
76  * fall back to the non-atomic C versions of these calls.
77  */
78
79 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
80                                                 gint32 exch, gint32 comp)
81 {
82         gint32 old;
83
84         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
85                               : "=m" (*dest), "=a" (old)
86                               : "r" (exch), "m" (*dest), "a" (comp));   
87         return(old);
88 }
89
90 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
91 {
92         gpointer old;
93
94         __asm__ __volatile__ ("lock; "
95 #if defined(__x86_64__)  && !defined(__native_client__)
96                               "cmpxchgq"
97 #else
98                               "cmpxchgl"
99 #endif
100                               " %2, %0"
101                               : "=m" (*dest), "=a" (old)
102                               : "r" (exch), "m" (*dest), "a" (comp));   
103
104         return(old);
105 }
106
107 static inline gint32 InterlockedIncrement(volatile gint32 *val)
108 {
109         gint32 tmp;
110         
111         __asm__ __volatile__ ("lock; xaddl %0, %1"
112                               : "=r" (tmp), "=m" (*val)
113                               : "0" (1), "m" (*val));
114
115         return(tmp+1);
116 }
117
118 static inline gint32 InterlockedDecrement(volatile gint32 *val)
119 {
120         gint32 tmp;
121         
122         __asm__ __volatile__ ("lock; xaddl %0, %1"
123                               : "=r" (tmp), "=m" (*val)
124                               : "0" (-1), "m" (*val));
125
126         return(tmp-1);
127 }
128
129 /*
130  * See
131  * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
132  * for the reasons for using cmpxchg and a loop here.
133  *
134  * That url is no longer valid, but it's still in the google cache at the
135  * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
136  *
137  * For the time being, http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
138  * might work.  Bet it will change soon enough though.
139  */
140 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
141 {
142         gint32 ret;
143         
144         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
145                               : "=m" (*val), "=a" (ret)
146                               : "r" (new_val), "m" (*val), "a" (*val));
147
148         return(ret);
149 }
150
151 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
152                                                   gpointer new_val)
153 {
154         gpointer ret;
155         
156         __asm__ __volatile__ ("1:; lock; "
157 #if defined(__x86_64__)  && !defined(__native_client__)
158                               "cmpxchgq"
159 #else
160                               "cmpxchgl"
161 #endif
162                               " %2, %0; jne 1b"
163                               : "=m" (*val), "=a" (ret)
164                               : "r" (new_val), "m" (*val), "a" (*val));
165
166         return(ret);
167 }
168
169 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
170 {
171         gint32 ret;
172         
173         __asm__ __volatile__ ("lock; xaddl %0, %1"
174                               : "=r" (ret), "=m" (*val)
175                               : "0" (add), "m" (*val));
176         
177         return(ret);
178 }
179
180 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
181 #define WAPI_ATOMIC_ASM
182
183 G_GNUC_UNUSED 
184 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
185 {
186        register volatile gint32 *dest asm("g1") = _dest;
187        register gint32 comp asm("o4") = _comp;
188        register gint32 exch asm("o5") = _exch;
189
190        __asm__ __volatile__(
191                /* cas [%%g1], %%o4, %%o5 */
192                ".word 0xdbe0500c"
193                : "=r" (exch)
194                : "0" (exch), "r" (dest), "r" (comp)
195                : "memory");
196
197        return exch;
198 }
199
200 G_GNUC_UNUSED 
201 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
202 {
203        register volatile gpointer *dest asm("g1") = _dest;
204        register gpointer comp asm("o4") = _comp;
205        register gpointer exch asm("o5") = _exch;
206
207        __asm__ __volatile__(
208 #ifdef SPARCV9
209                /* casx [%%g1], %%o4, %%o5 */
210                ".word 0xdbf0500c"
211 #else
212                /* cas [%%g1], %%o4, %%o5 */
213                ".word 0xdbe0500c"
214 #endif
215                : "=r" (exch)
216                : "0" (exch), "r" (dest), "r" (comp)
217                : "memory");
218
219        return exch;
220 }
221
222 G_GNUC_UNUSED 
223 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
224 {
225        register volatile gint32 *dest asm("g1") = _dest;
226        register gint32 tmp asm("o4");
227        register gint32 ret asm("o5");
228
229        __asm__ __volatile__(
230                "1:     ld      [%%g1], %%o4\n\t"
231                "       add     %%o4, 1, %%o5\n\t"
232                /*      cas     [%%g1], %%o4, %%o5 */
233                "       .word   0xdbe0500c\n\t"
234                "       cmp     %%o4, %%o5\n\t"
235                "       bne     1b\n\t"
236                "        add    %%o5, 1, %%o5"
237                : "=&r" (tmp), "=&r" (ret)
238                : "r" (dest)
239                : "memory", "cc");
240
241         return ret;
242 }
243
244 G_GNUC_UNUSED 
245 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
246 {
247        register volatile gint32 *dest asm("g1") = _dest;
248        register gint32 tmp asm("o4");
249        register gint32 ret asm("o5");
250
251        __asm__ __volatile__(
252                "1:     ld      [%%g1], %%o4\n\t"
253                "       sub     %%o4, 1, %%o5\n\t"
254                /*      cas     [%%g1], %%o4, %%o5 */
255                "       .word   0xdbe0500c\n\t"
256                "       cmp     %%o4, %%o5\n\t"
257                "       bne     1b\n\t"
258                "        sub    %%o5, 1, %%o5"
259                : "=&r" (tmp), "=&r" (ret)
260                : "r" (dest)
261                : "memory", "cc");
262
263         return ret;
264 }
265
266 G_GNUC_UNUSED
267 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
268 {
269        register volatile gint32 *dest asm("g1") = _dest;
270        register gint32 tmp asm("o4");
271        register gint32 ret asm("o5");
272
273        __asm__ __volatile__(
274                "1:     ld      [%%g1], %%o4\n\t"
275                "       mov     %3, %%o5\n\t"
276                /*      cas     [%%g1], %%o4, %%o5 */
277                "       .word   0xdbe0500c\n\t"
278                "       cmp     %%o4, %%o5\n\t"
279                "       bne     1b\n\t"
280                "        nop"
281                : "=&r" (tmp), "=&r" (ret)
282                : "r" (dest), "r" (exch)
283                : "memory", "cc");
284
285         return ret;
286 }
287
288 G_GNUC_UNUSED
289 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
290 {
291        register volatile gpointer *dest asm("g1") = _dest;
292        register gpointer tmp asm("o4");
293        register gpointer ret asm("o5");
294
295        __asm__ __volatile__(
296 #ifdef SPARCV9
297                "1:     ldx     [%%g1], %%o4\n\t"
298 #else
299                "1:     ld      [%%g1], %%o4\n\t"
300 #endif
301                "       mov     %3, %%o5\n\t"
302 #ifdef SPARCV9
303                /*      casx    [%%g1], %%o4, %%o5 */
304                "       .word   0xdbf0500c\n\t"
305 #else
306                /*      cas     [%%g1], %%o4, %%o5 */
307                "       .word   0xdbe0500c\n\t"
308 #endif
309                "       cmp     %%o4, %%o5\n\t"
310                "       bne     1b\n\t"
311                "        nop"
312                : "=&r" (tmp), "=&r" (ret)
313                : "r" (dest), "r" (exch)
314                : "memory", "cc");
315
316         return ret;
317 }
318
319 G_GNUC_UNUSED
320 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
321 {
322        register volatile gint32 *dest asm("g1") = _dest;
323        register gint32 tmp asm("o4");
324        register gint32 ret asm("o5");
325
326        __asm__ __volatile__(
327                "1:     ld      [%%g1], %%o4\n\t"
328                "       add     %%o4, %3, %%o5\n\t"
329                /*      cas     [%%g1], %%o4, %%o5 */
330                "       .word   0xdbe0500c\n\t"
331                "       cmp     %%o4, %%o5\n\t"
332                "       bne     1b\n\t"
333                "        add    %%o5, %3, %%o5"
334                : "=&r" (tmp), "=&r" (ret)
335                : "r" (dest), "r" (add)
336                : "memory", "cc");
337
338         return ret;
339 }
340
341 #elif __s390__
342
343 #define WAPI_ATOMIC_ASM
344
345 static inline gint32 
346 InterlockedCompareExchange(volatile gint32 *dest,
347                            gint32 exch, gint32 comp)
348 {
349         gint32 old;
350
351         __asm__ __volatile__ ("\tLA\t1,%0\n"
352                               "\tLR\t%1,%3\n"
353                               "\tCS\t%1,%2,0(1)\n"
354                               : "+m" (*dest), "=&r" (old)
355                               : "r" (exch), "r" (comp)
356                               : "1", "cc");     
357         return(old);
358 }
359
360 #ifndef __s390x__
361 static inline gpointer
362 InterlockedCompareExchangePointer(volatile gpointer *dest,
363                            gpointer exch, gpointer comp)
364 {
365         gpointer old;
366
367         __asm__ __volatile__ ("\tLA\t1,%0\n"
368                               "\tLR\t%1,%3\n"
369                               "\tCS\t%1,%2,0(1)\n"
370                               : "+m" (*dest), "=&r" (old)
371                               : "r" (exch), "r" (comp)
372                               : "1", "cc");     
373         return(old);
374 }
375 # else
376 static inline gpointer 
377 InterlockedCompareExchangePointer(volatile gpointer *dest, 
378                                   gpointer exch, 
379                                   gpointer comp)
380 {
381         gpointer old;
382
383         __asm__ __volatile__ ("\tLA\t1,%0\n"
384                               "\tLGR\t%1,%3\n"
385                               "\tCSG\t%1,%2,0(1)\n"
386                               : "+m" (*dest), "=&r" (old)
387                               : "r" (exch), "r" (comp)
388                               : "1", "cc");
389
390         return(old);
391 }
392 # endif
393
394 # ifndef __s390x__
395 static inline gint32 
396 InterlockedIncrement(volatile gint32 *val)
397 {
398         gint32 tmp;
399         
400         __asm__ __volatile__ ("\tLA\t2,%1\n"
401                               "0:\tL\t%0,%1\n"
402                               "\tLR\t1,%0\n"
403                               "\tAHI\t1,1\n"
404                               "\tCS\t%0,1,0(2)\n"
405                               "\tJNZ\t0b\n"
406                               "\tLR\t%0,1"
407                               : "=r" (tmp), "+m" (*val)
408                               : : "1", "2", "cc");
409
410         return(tmp);
411 }
412 # else
413 static inline gint32 
414 InterlockedIncrement(volatile gint32 *val)
415 {
416         gint32 tmp;
417         
418         __asm__ __volatile__ ("\tLA\t2,%1\n"
419                               "0:\tLGF\t%0,%1\n"
420                               "\tLGFR\t1,%0\n"
421                               "\tAGHI\t1,1\n"
422                               "\tCS\t%0,1,0(2)\n"
423                               "\tJNZ\t0b\n"
424                               "\tLGFR\t%0,1"
425                               : "=r" (tmp), "+m" (*val)
426                               : : "1", "2", "cc");
427
428         return(tmp);
429 }
430 # endif
431
432 # ifndef __s390x__
433 static inline gint32 
434 InterlockedDecrement(volatile gint32 *val)
435 {
436         gint32 tmp;
437         
438         __asm__ __volatile__ ("\tLA\t2,%1\n"
439                               "0:\tL\t%0,%1\n"
440                               "\tLR\t1,%0\n"
441                               "\tAHI\t1,-1\n"
442                               "\tCS\t%0,1,0(2)\n"
443                               "\tJNZ\t0b\n"
444                               "\tLR\t%0,1"
445                               : "=r" (tmp), "+m" (*val)
446                               : : "1", "2", "cc");
447
448         return(tmp);
449 }
450 # else
451 static inline gint32 
452 InterlockedDecrement(volatile gint32 *val)
453 {
454         gint32 tmp;
455         
456         __asm__ __volatile__ ("\tLA\t2,%1\n"
457                               "0:\tLGF\t%0,%1\n"
458                               "\tLGFR\t1,%0\n"
459                               "\tAGHI\t1,-1\n"
460                               "\tCS\t%0,1,0(2)\n"
461                               "\tJNZ\t0b\n"
462                               "\tLGFR\t%0,1"
463                               : "=r" (tmp), "+m" (*val)
464                               : : "1", "2", "cc");
465
466         return(tmp);
467 }
468 # endif
469
470 static inline gint32 
471 InterlockedExchange(volatile gint32 *val, gint32 new_val)
472 {
473         gint32 ret;
474         
475         __asm__ __volatile__ ("\tLA\t1,%0\n"
476                               "0:\tL\t%1,%0\n"
477                               "\tCS\t%1,%2,0(1)\n"
478                               "\tJNZ\t0b"
479                               : "+m" (*val), "=&r" (ret)
480                               : "r" (new_val)
481                               : "1", "cc");
482
483         return(ret);
484 }
485
486 # ifndef __s390x__
487 static inline gpointer 
488 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
489 {
490         gpointer ret;
491         
492         __asm__ __volatile__ ("\tLA\t1,%0\n"
493                               "0:\tL\t%1,%0\n"
494                               "\tCS\t%1,%2,0(1)\n"
495                               "\tJNZ\t0b"
496                               : "+m" (*val), "=&r" (ret)
497                               : "r" (new_val)
498                               : "1", "cc");
499
500         return(ret);
501 }
502 # else
503 static inline gpointer
504 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
505 {
506         gpointer ret;
507         
508         __asm__ __volatile__ ("\tLA\t1,%0\n"
509                               "0:\tLG\t%1,%0\n"
510                               "\tCSG\t%1,%2,0(1)\n"
511                               "\tJNZ\t0b"
512                               : "+m" (*val), "=&r" (ret)
513                               : "r" (new_val)
514                               : "1", "cc");
515
516         return(ret);
517 }
518 # endif
519
520 # ifndef __s390x__
521 static inline gint32 
522 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
523 {
524         gint32 ret;
525
526         __asm__ __volatile__ ("\tLA\t2,%1\n"
527                               "0:\tL\t%0,%1\n"
528                               "\tLR\t1,%0\n"
529                               "\tAR\t1,%2\n"
530                               "\tCS\t%0,1,0(2)\n"
531                               "\tJNZ\t0b"
532                               : "=&r" (ret), "+m" (*val)
533                               : "r" (add) 
534                               : "1", "2", "cc");
535         
536         return(ret);
537 }
538 # else
539 static inline gint32 
540 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
541 {
542         gint32 ret;
543
544         __asm__ __volatile__ ("\tLA\t2,%1\n"
545                               "0:\tLGF\t%0,%1\n"
546                               "\tLGFR\t1,%0\n"
547                               "\tAGR\t1,%2\n"
548                               "\tCS\t%0,1,0(2)\n"
549                               "\tJNZ\t0b"
550                               : "=&r" (ret), "+m" (*val)
551                               : "r" (add) 
552                               : "1", "2", "cc");
553         
554         return(ret);
555 }
556 # endif
557
558 #elif defined(__mono_ppc__)
559 #define WAPI_ATOMIC_ASM
560
561 #ifdef G_COMPILER_CODEWARRIOR
562 static inline gint32 InterlockedIncrement(volatile register gint32 *val)
563 {
564         gint32 result = 0, tmp;
565         register gint32 result = 0;
566         register gint32 tmp;
567
568         asm
569         {
570                 @1:
571                         lwarx   tmp, 0, val
572                         addi    result, tmp, 1
573                         stwcx.  result, 0, val
574                         bne-    @1
575         }
576  
577         return result;
578 }
579
580 static inline gint32 InterlockedDecrement(register volatile gint32 *val)
581 {
582         register gint32 result = 0;
583         register gint32 tmp;
584
585         asm
586         {
587                 @1:
588                         lwarx   tmp, 0, val
589                         addi    result, tmp, -1
590                         stwcx.  result, 0, val
591                         bne-    @1
592         }
593
594         return result;
595 }
596 #define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
597
598 static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
599 {
600         register gint32 tmp = 0;
601
602         asm
603         {
604                 @1:
605                         lwarx   tmp, 0, dest
606                         cmpw    tmp, comp
607                         bne-    @2
608                         stwcx.  exch, 0, dest
609                         bne-    @1
610                 @2:
611         }
612
613         return tmp;
614 }
615 static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
616 {
617         register gint32 tmp = 0;
618
619         asm
620         {
621                 @1:
622                         lwarx   tmp, 0, dest
623                         stwcx.  exch, 0, dest
624                         bne-    @1
625         }
626
627         return tmp;
628 }
629 #define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
630 #else
631
632 #if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
633 #define LDREGX "ldarx"
634 #define STREGCXD "stdcx."
635 #define CMPREG "cmpd"
636 #else
637 #define LDREGX "lwarx"
638 #define STREGCXD "stwcx."
639 #define CMPREG "cmpw"
640 #endif
641
642 static inline gint32 InterlockedIncrement(volatile gint32 *val)
643 {
644         gint32 result = 0, tmp;
645
646         __asm__ __volatile__ ("\n1:\n\t"
647                               "lwarx  %0, 0, %2\n\t"
648                               "addi   %1, %0, 1\n\t"
649                               "stwcx. %1, 0, %2\n\t"
650                               "bne-   1b"
651                               : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
652         return result + 1;
653 }
654
655 static inline gint32 InterlockedDecrement(volatile gint32 *val)
656 {
657         gint32 result = 0, tmp;
658
659         __asm__ __volatile__ ("\n1:\n\t"
660                               "lwarx  %0, 0, %2\n\t"
661                               "addi   %1, %0, -1\n\t"
662                               "stwcx. %1, 0, %2\n\t"
663                               "bne-   1b"
664                               : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
665         return result - 1;
666 }
667
668 static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
669                                                 gpointer exch, gpointer comp)
670 {
671         gpointer tmp = NULL;
672
673         __asm__ __volatile__ ("\n1:\n\t"
674                              LDREGX " %0, 0, %1\n\t"
675                              CMPREG " %0, %2\n\t" 
676                              "bne-    2f\n\t"
677                              STREGCXD " %3, 0, %1\n\t"
678                              "bne-    1b\n"
679                              "2:"
680                              : "=&r" (tmp)
681                              : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
682         return(tmp);
683 }
684
685 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
686                                                 gint32 exch, gint32 comp) {
687         gint32 tmp = 0;
688
689         __asm__ __volatile__ ("\n1:\n\t"
690                              "lwarx   %0, 0, %1\n\t"
691                              "cmpw    %0, %2\n\t" 
692                              "bne-    2f\n\t"
693                              "stwcx.  %3, 0, %1\n\t"
694                              "bne-    1b\n"
695                              "2:"
696                              : "=&r" (tmp)
697                              : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
698         return(tmp);
699 }
700
701 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
702 {
703         gint32 tmp = 0;
704
705         __asm__ __volatile__ ("\n1:\n\t"
706                               "lwarx  %0, 0, %2\n\t"
707                               "stwcx. %3, 0, %2\n\t"
708                               "bne    1b"
709                               : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
710         return(tmp);
711 }
712
713 static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
714 {
715         gpointer tmp = NULL;
716
717         __asm__ __volatile__ ("\n1:\n\t"
718                               LDREGX " %0, 0, %2\n\t"
719                               STREGCXD " %3, 0, %2\n\t"
720                               "bne    1b"
721                               : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
722         return(tmp);
723 }
724
725 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
726 {
727         gint32 result, tmp;
728         __asm__ __volatile__ ("\n1:\n\t"
729                               "lwarx  %0, 0, %2\n\t"
730                               "add    %1, %0, %3\n\t"
731                               "stwcx. %1, 0, %2\n\t"
732                               "bne    1b"
733                               : "=&r" (result), "=&r" (tmp)
734                               : "r" (dest), "r" (add) : "cc", "memory");
735         return(result);
736 }
737
738 #undef LDREGX
739 #undef STREGCXD
740 #undef CMPREG
741
742 #endif /* !G_COMPILER_CODEWARRIOR */
743
744 #elif defined(__arm__)
745 #define WAPI_ATOMIC_ASM
746
747 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
748 {
749 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
750         gint32 ret, tmp;
751         __asm__ __volatile__ (  "1:\n"
752                                 "mov    %0, #0\n"
753                                 "ldrex %1, [%2]\n"
754                                 "teq    %1, %3\n"
755                                 "it eq\n"
756                                 "strexeq %0, %4, [%2]\n"
757                                 "teq %0, #0\n"
758                                 "bne 1b\n"
759                                 : "=&r" (tmp), "=&r" (ret)
760                                 : "r" (dest), "r" (comp), "r" (exch)
761                                 : "memory", "cc");
762
763         return ret;
764 #else
765         gint32 a, b;
766
767         __asm__ __volatile__ (    "0:\n\t"
768                                   "ldr %1, [%2]\n\t"
769                                   "cmp %1, %4\n\t"
770                                   "mov %0, %1\n\t"
771                                   "bne 1f\n\t"
772                                   "swp %0, %3, [%2]\n\t"
773                                   "cmp %0, %1\n\t"
774                                   "swpne %3, %0, [%2]\n\t"
775                                   "bne 0b\n\t"
776                                   "1:"
777                                   : "=&r" (a), "=&r" (b)
778                                   : "r" (dest), "r" (exch), "r" (comp)
779                                   : "cc", "memory");
780
781         return a;
782 #endif
783 }
784
785 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
786 {
787 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
788         gpointer ret, tmp;
789         __asm__ __volatile__ (  "1:\n"
790                                 "mov    %0, #0\n"
791                                 "ldrex %1, [%2]\n"
792                                 "teq    %1, %3\n"
793                                 "it eq\n"
794                                 "strexeq %0, %4, [%2]\n"
795                                 "teq %0, #0\n"
796                                 "bne 1b\n"
797                                 : "=&r" (tmp), "=&r" (ret)
798                                 : "r" (dest), "r" (comp), "r" (exch)
799                                 : "memory", "cc");
800
801         return ret;
802 #else
803         gpointer a, b;
804
805         __asm__ __volatile__ (    "0:\n\t"
806                                   "ldr %1, [%2]\n\t"
807                                   "cmp %1, %4\n\t"
808                                   "mov %0, %1\n\t"
809                                   "bne 1f\n\t"
810                                   "swpeq %0, %3, [%2]\n\t"
811                                   "cmp %0, %1\n\t"
812                                   "swpne %3, %0, [%2]\n\t"
813                                   "bne 0b\n\t"
814                                   "1:"
815                                   : "=&r" (a), "=&r" (b)
816                                   : "r" (dest), "r" (exch), "r" (comp)
817                                   : "cc", "memory");
818
819         return a;
820 #endif
821 }
822
823 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
824 {
825 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
826         gint32 ret, flag;
827         __asm__ __volatile__ (  "1:\n"
828                                 "ldrex %0, [%2]\n"
829                                 "add %0, %0, %3\n"
830                                 "strex %1, %0, [%2]\n"
831                                 "teq %1, #0\n"
832                                 "bne 1b\n"
833                                 : "=&r" (ret), "=&r" (flag)
834                                 : "r" (dest), "r" (1)
835                                 : "memory", "cc");
836
837         return ret;
838 #else
839         gint32 a, b, c;
840
841         __asm__ __volatile__ (  "0:\n\t"
842                                 "ldr %0, [%3]\n\t"
843                                 "add %1, %0, %4\n\t"
844                                 "swp %2, %1, [%3]\n\t"
845                                 "cmp %0, %2\n\t"
846                                 "swpne %1, %2, [%3]\n\t"
847                                 "bne 0b"
848                                 : "=&r" (a), "=&r" (b), "=&r" (c)
849                                 : "r" (dest), "r" (1)
850                                 : "cc", "memory");
851
852         return b;
853 #endif
854 }
855
856 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
857 {
858 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
859         gint32 ret, flag;
860         __asm__ __volatile__ (  "1:\n"
861                                 "ldrex %0, [%2]\n"
862                                 "sub %0, %0, %3\n"
863                                 "strex %1, %0, [%2]\n"
864                                 "teq %1, #0\n"
865                                 "bne 1b\n"
866                                 : "=&r" (ret), "=&r" (flag)
867                                 : "r" (dest), "r" (1)
868                                 : "memory", "cc");
869
870         return ret;
871 #else
872         gint32 a, b, c;
873
874         __asm__ __volatile__ (  "0:\n\t"
875                                 "ldr %0, [%3]\n\t"
876                                 "add %1, %0, %4\n\t"
877                                 "swp %2, %1, [%3]\n\t"
878                                 "cmp %0, %2\n\t"
879                                 "swpne %1, %2, [%3]\n\t"
880                                 "bne 0b"
881                                 : "=&r" (a), "=&r" (b), "=&r" (c)
882                                 : "r" (dest), "r" (-1)
883                                 : "cc", "memory");
884
885         return b;
886 #endif
887 }
888
889 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
890 {
891 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
892         gint32 ret, flag;
893         __asm__ __volatile__ (
894                               "1:\n"
895                               "ldrex %0, [%3]\n"
896                               "strex %1, %2, [%3]\n"
897                               "teq %1, #0\n"
898                               "bne 1b\n"
899                               : "=&r" (ret), "=&r" (flag)
900                               : "r" (exch), "r" (dest)
901                               : "memory", "cc");
902         return ret;
903 #else
904         gint32 a;
905
906         __asm__ __volatile__ (  "swp %0, %2, [%1]"
907                                 : "=&r" (a)
908                                 : "r" (dest), "r" (exch));
909
910         return a;
911 #endif
912 }
913
914 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
915 {
916 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
917         gpointer ret, flag;
918         __asm__ __volatile__ (
919                               "1:\n"
920                               "ldrex %0, [%3]\n"
921                               "strex %1, %2, [%3]\n"
922                               "teq %1, #0\n"
923                               "bne 1b\n"
924                               : "=&r" (ret), "=&r" (flag)
925                               : "r" (exch), "r" (dest)
926                               : "memory", "cc");
927         return ret;
928 #else
929         gpointer a;
930
931         __asm__ __volatile__ (  "swp %0, %2, [%1]"
932                                 : "=&r" (a)
933                                 : "r" (dest), "r" (exch));
934
935         return a;
936 #endif
937 }
938
939 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
940 {
941 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
942         gint32 ret, tmp, flag;
943         __asm__ __volatile__ (  "1:\n"
944                                 "ldrex %0, [%3]\n"
945                                 "add %1, %0, %4\n"
946                                 "strex %2, %1, [%3]\n"
947                                 "teq %2, #0\n"
948                                 "bne 1b\n"
949                                 : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
950                                 : "r" (dest), "r" (add)
951                                 : "memory", "cc");
952
953         return ret;
954 #else
955         int a, b, c;
956
957         __asm__ __volatile__ (  "0:\n\t"
958                                 "ldr %0, [%3]\n\t"
959                                 "add %1, %0, %4\n\t"
960                                 "swp %2, %1, [%3]\n\t"
961                                 "cmp %0, %2\n\t"
962                                 "swpne %1, %2, [%3]\n\t"
963                                 "bne 0b"
964                                 : "=&r" (a), "=&r" (b), "=&r" (c)
965                                 : "r" (dest), "r" (add)
966                                 : "cc", "memory");
967
968         return a;
969 #endif
970 }
971
972 #elif defined(__ia64__)
973 #define WAPI_ATOMIC_ASM
974
975 #ifdef __INTEL_COMPILER
976 #include <ia64intrin.h>
977 #endif
978
979 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
980                                                 gint32 exch, gint32 comp)
981 {
982         gint32 old;
983         guint64 real_comp;
984
985 #ifdef __INTEL_COMPILER
986         old = _InterlockedCompareExchange (dest, exch, comp);
987 #else
988         /* cmpxchg4 zero extends the value read from memory */
989         real_comp = (guint64)(guint32)comp;
990         asm volatile ("mov ar.ccv = %2 ;;\n\t"
991                                   "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
992                                   : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
993 #endif
994
995         return(old);
996 }
997
998 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
999                                                 gpointer exch, gpointer comp)
1000 {
1001         gpointer old;
1002
1003 #ifdef __INTEL_COMPILER
1004         old = _InterlockedCompareExchangePointer (dest, exch, comp);
1005 #else
1006         asm volatile ("mov ar.ccv = %2 ;;\n\t"
1007                                   "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
1008                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
1009 #endif
1010
1011         return(old);
1012 }
1013
1014 static inline gint32 InterlockedIncrement(gint32 volatile *val)
1015 {
1016 #ifdef __INTEL_COMPILER
1017         return _InterlockedIncrement (val);
1018 #else
1019         gint32 old;
1020
1021         do {
1022                 old = *val;
1023         } while (InterlockedCompareExchange (val, old + 1, old) != old);
1024
1025         return old + 1;
1026 #endif
1027 }
1028
1029 static inline gint32 InterlockedDecrement(gint32 volatile *val)
1030 {
1031 #ifdef __INTEL_COMPILER
1032         return _InterlockedDecrement (val);
1033 #else
1034         gint32 old;
1035
1036         do {
1037                 old = *val;
1038         } while (InterlockedCompareExchange (val, old - 1, old) != old);
1039
1040         return old - 1;
1041 #endif
1042 }
1043
1044 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
1045 {
1046 #ifdef __INTEL_COMPILER
1047         return _InterlockedExchange (dest, new_val);
1048 #else
1049         gint32 res;
1050
1051         do {
1052                 res = *dest;
1053         } while (InterlockedCompareExchange (dest, new_val, res) != res);
1054
1055         return res;
1056 #endif
1057 }
1058
1059 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
1060 {
1061 #ifdef __INTEL_COMPILER
1062         return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
1063 #else
1064         gpointer res;
1065
1066         do {
1067                 res = *dest;
1068         } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
1069
1070         return res;
1071 #endif
1072 }
1073
1074 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
1075 {
1076         gint32 old;
1077
1078 #ifdef __INTEL_COMPILER
1079         old = _InterlockedExchangeAdd (val, add);
1080 #else
1081         do {
1082                 old = *val;
1083         } while (InterlockedCompareExchange (val, old + add, old) != old);
1084
1085         return old;
1086 #endif
1087 }
1088
1089 #elif defined(__alpha__)
1090 #define WAPI_ATOMIC_ASM
1091
1092 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1093                                                 gint32 exch, gint32 comp)
1094 {
1095         gint32 old, temp, temp2;
1096         long compq = comp, exchq = exch;
1097
1098         __asm__ __volatile__ (
1099                 "1:     ldl_l %2, %0\n"
1100                 "       mov %2, %1\n"
1101                 "       cmpeq %2, %5, %3\n"
1102                 "       cmovne %3, %4, %2\n"
1103                 "       stl_c %2, %0\n"
1104                 "       beq %2, 1b\n"
1105                 : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
1106                 : "r" (exchq), "r" (compq), "m" (*dest));
1107         return(old);
1108 }
1109
1110 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1111 {
1112         gpointer old, temp, temp2;
1113
1114         __asm__ __volatile__ (
1115                 "1:     ldq_l %2, %0\n"
1116                 "       mov %2, %1\n"
1117                 "       cmpeq %2, %5, %3\n"
1118                 "       cmovne %3, %4, %2\n"
1119                 "       stq_c %2, %0\n"
1120                 "       beq %2, 1b\n"
1121                 : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
1122                 : "r" (exch), "r" (comp), "m" (*dest));
1123         return(old);
1124 }
1125
1126 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1127 {
1128         gint32 temp, cur;
1129         
1130         __asm__ __volatile__ (
1131                 "1:     ldl_l %0, %1\n"
1132                 "       addl %0, %3, %0\n"
1133                 "       mov %0, %2\n"
1134                 "       stl_c %0, %1\n"
1135                 "       beq %0, 1b\n"
1136                 : "=&r" (temp), "=m" (*val), "=r" (cur)
1137                 : "Ir" (1), "m" (*val));
1138         return(cur);
1139 }
1140
1141 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1142 {
1143         gint32 temp, cur;
1144         
1145         __asm__ __volatile__ (
1146                 "1:     ldl_l %0, %1\n"
1147                 "       subl %0, %3, %0\n"
1148                 "       mov %0, %2\n"
1149                 "       stl_c %0, %1\n"
1150                 "       beq %0, 1b\n"
1151                 : "=&r" (temp), "=m" (*val), "=r" (cur)
1152                 : "Ir" (1), "m" (*val));
1153         return(cur);
1154 }
1155
1156 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
1157 {
1158         gint32 ret, temp;
1159
1160         __asm__ __volatile__ (
1161                 "1:     ldl_l %1, %0\n"
1162                 "       mov %3, %2\n"
1163                 "       stl_c %2, %0\n"
1164                 "       beq %2, 1b\n"
1165                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
1166                 : "r" (new_val), "m" (*val));
1167         return(ret);
1168 }
1169
1170 static inline gpointer InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
1171 {
1172         gpointer ret, temp;
1173
1174         __asm__ __volatile__ (
1175                 "1:     ldq_l %1, %0\n"
1176                 "       mov %3, %2\n"
1177                 "       stq_c %2, %0\n"
1178                 "       beq %2, 1b\n"
1179                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
1180                 : "r" (new_val), "m" (*val));
1181         return(ret);
1182 }
1183
1184 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
1185 {
1186         gint32 ret, temp;
1187         
1188         __asm__ __volatile__ (
1189                 "1:     ldl_l   %2, %0\n"
1190                 "       mov     %2, %1\n"
1191                 "       addl    %2, %3, %2\n"
1192                 "       stl_c   %2, %0\n"
1193                 "       beq     %2, 1b\n"
1194                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
1195                 : "r" (add), "m" (*val));
1196         
1197         return(ret);
1198 }
1199
1200 #elif defined(__mips__)
1201 #define WAPI_ATOMIC_ASM
1202
1203 static inline gint32 InterlockedIncrement(volatile gint32 *val)
1204 {
1205         gint32 tmp, result = 0;
1206
1207         __asm__ __volatile__ ("    .set    mips32\n"
1208                               "1:  ll      %0, %2\n"
1209                               "    addu    %1, %0, 1\n"
1210                               "    sc      %1, %2\n"
1211                               "    beqz    %1, 1b\n"
1212                               "    .set    mips0\n"
1213                               : "=&r" (result), "=&r" (tmp), "=m" (*val)
1214                               : "m" (*val));
1215         return result + 1;
1216 }
1217
1218 static inline gint32 InterlockedDecrement(volatile gint32 *val)
1219 {
1220         gint32 tmp, result = 0;
1221
1222         __asm__ __volatile__ ("    .set    mips32\n"
1223                               "1:  ll      %0, %2\n"
1224                               "    subu    %1, %0, 1\n"
1225                               "    sc      %1, %2\n"
1226                               "    beqz    %1, 1b\n"
1227                               "    .set    mips0\n"
1228                               : "=&r" (result), "=&r" (tmp), "=m" (*val)
1229                               : "m" (*val));
1230         return result - 1;
1231 }
1232
1233 #define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
1234
1235 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1236                                                 gint32 exch, gint32 comp) {
1237         gint32 old, tmp;
1238
1239         __asm__ __volatile__ ("    .set    mips32\n"
1240                               "1:  ll      %0, %2\n"
1241                               "    bne     %0, %5, 2f\n"
1242                               "    move    %1, %4\n"
1243                               "    sc      %1, %2\n"
1244                               "    beqz    %1, 1b\n"
1245                               "2:  .set    mips0\n"
1246                               : "=&r" (old), "=&r" (tmp), "=m" (*dest)
1247                               : "m" (*dest), "r" (exch), "r" (comp));
1248         return(old);
1249 }
1250
1251 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
1252 {
1253         gint32 result, tmp;
1254
1255         __asm__ __volatile__ ("    .set    mips32\n"
1256                               "1:  ll      %0, %2\n"
1257                               "    move    %1, %4\n"
1258                               "    sc      %1, %2\n"
1259                               "    beqz    %1, 1b\n"
1260                               "    .set    mips0\n"
1261                               : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1262                               : "m" (*dest), "r" (exch));
1263         return(result);
1264 }
1265 #define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
1266
1267 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1268 {
1269         gint32 result, tmp;
1270
1271         __asm__ __volatile__ ("    .set    mips32\n"
1272                               "1:  ll      %0, %2\n"
1273                               "    addu    %1, %0, %4\n"
1274                               "    sc      %1, %2\n"
1275                               "    beqz    %1, 1b\n"
1276                               "    .set    mips0\n"
1277                               : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1278                               : "m" (*dest), "r" (add));
1279         return result;
1280 }
1281
1282 #else
1283
1284 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1285 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1286 extern gint32 InterlockedIncrement(volatile gint32 *dest);
1287 extern gint32 InterlockedDecrement(volatile gint32 *dest);
1288 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1289 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1290 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1291
1292 #if defined(__hppa__)
1293 #define WAPI_ATOMIC_ASM
1294 #endif
1295
1296 #endif
1297
1298 #endif /* _WAPI_ATOMIC_H_ */