2007-10-20 Zoltan Varga <vargaz@gmail.com>
[mono.git] / mono / io-layer / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  */
9
10 #ifndef _WAPI_ATOMIC_H_
11 #define _WAPI_ATOMIC_H_
12
13 #include <glib.h>
14
15 #include "mono/io-layer/wapi.h"
16
17 #if defined(__i386__) || defined(__x86_64__)
18 #define WAPI_ATOMIC_ASM
19
20 /*
21  * NB: The *Pointer() functions here assume that
22  * sizeof(pointer)==sizeof(gint32)
23  *
24  * NB2: These asm functions assume 486+ (some of the opcodes dont
25  * exist on 386).  If this becomes an issue, we can get configure to
26  * fall back to the non-atomic C versions of these calls.
27  */
28
29 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
30                                                 gint32 exch, gint32 comp)
31 {
32         gint32 old;
33
34         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
35                               : "=m" (*dest), "=a" (old)
36                               : "r" (exch), "m" (*dest), "a" (comp));   
37         return(old);
38 }
39
40 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
41 {
42         gpointer old;
43
44         __asm__ __volatile__ ("lock; "
45 #ifdef __x86_64__
46                               "cmpxchgq"
47 #else
48                               "cmpxchgl"
49 #endif
50                               " %2, %0"
51                               : "=m" (*dest), "=a" (old)
52                               : "r" (exch), "m" (*dest), "a" (comp));   
53
54         return(old);
55 }
56
57 static inline gint32 InterlockedIncrement(volatile gint32 *val)
58 {
59         gint32 tmp;
60         
61         __asm__ __volatile__ ("lock; xaddl %0, %1"
62                               : "=r" (tmp), "=m" (*val)
63                               : "0" (1), "m" (*val));
64
65         return(tmp+1);
66 }
67
68 static inline gint32 InterlockedDecrement(volatile gint32 *val)
69 {
70         gint32 tmp;
71         
72         __asm__ __volatile__ ("lock; xaddl %0, %1"
73                               : "=r" (tmp), "=m" (*val)
74                               : "0" (-1), "m" (*val));
75
76         return(tmp-1);
77 }
78
79 /*
80  * See
81  * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
82  * for the reasons for using cmpxchg and a loop here.
83  *
84  * That url is no longer valid, but it's still in the google cache at the
85  * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
86  *
87  * For the time being, http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
88  * might work.  Bet it will change soon enough though.
89  */
90 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
91 {
92         gint32 ret;
93         
94         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
95                               : "=m" (*val), "=a" (ret)
96                               : "r" (new_val), "m" (*val), "a" (*val));
97
98         return(ret);
99 }
100
101 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
102                                                   gpointer new_val)
103 {
104         gpointer ret;
105         
106         __asm__ __volatile__ ("1:; lock; "
107 #ifdef __x86_64__
108                               "cmpxchgq"
109 #else
110                               "cmpxchgl"
111 #endif
112                               " %2, %0; jne 1b"
113                               : "=m" (*val), "=a" (ret)
114                               : "r" (new_val), "m" (*val), "a" (*val));
115
116         return(ret);
117 }
118
119 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
120 {
121         gint32 ret;
122         
123         __asm__ __volatile__ ("lock; xaddl %0, %1"
124                               : "=r" (ret), "=m" (*val)
125                               : "0" (add), "m" (*val));
126         
127         return(ret);
128 }
129
130 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
131 #define WAPI_ATOMIC_ASM
132
133 G_GNUC_UNUSED 
134 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
135 {
136        register volatile gint32 *dest asm("g1") = _dest;
137        register gint32 comp asm("o4") = _comp;
138        register gint32 exch asm("o5") = _exch;
139
140        __asm__ __volatile__(
141                /* cas [%%g1], %%o4, %%o5 */
142                ".word 0xdbe0500c"
143                : "=r" (exch)
144                : "0" (exch), "r" (dest), "r" (comp)
145                : "memory");
146
147        return exch;
148 }
149
150 G_GNUC_UNUSED 
151 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
152 {
153        register volatile gpointer *dest asm("g1") = _dest;
154        register gpointer comp asm("o4") = _comp;
155        register gpointer exch asm("o5") = _exch;
156
157        __asm__ __volatile__(
158 #ifdef SPARCV9
159                /* casx [%%g1], %%o4, %%o5 */
160                ".word 0xdbf0500c"
161 #else
162                /* cas [%%g1], %%o4, %%o5 */
163                ".word 0xdbe0500c"
164 #endif
165                : "=r" (exch)
166                : "0" (exch), "r" (dest), "r" (comp)
167                : "memory");
168
169        return exch;
170 }
171
172 G_GNUC_UNUSED 
173 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
174 {
175        register volatile gint32 *dest asm("g1") = _dest;
176        register gint32 tmp asm("o4");
177        register gint32 ret asm("o5");
178
179        __asm__ __volatile__(
180                "1:     ld      [%%g1], %%o4\n\t"
181                "       add     %%o4, 1, %%o5\n\t"
182                /*      cas     [%%g1], %%o4, %%o5 */
183                "       .word   0xdbe0500c\n\t"
184                "       cmp     %%o4, %%o5\n\t"
185                "       bne     1b\n\t"
186                "        add    %%o5, 1, %%o5"
187                : "=&r" (tmp), "=&r" (ret)
188                : "r" (dest)
189                : "memory", "cc");
190
191         return ret;
192 }
193
194 G_GNUC_UNUSED 
195 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
196 {
197        register volatile gint32 *dest asm("g1") = _dest;
198        register gint32 tmp asm("o4");
199        register gint32 ret asm("o5");
200
201        __asm__ __volatile__(
202                "1:     ld      [%%g1], %%o4\n\t"
203                "       sub     %%o4, 1, %%o5\n\t"
204                /*      cas     [%%g1], %%o4, %%o5 */
205                "       .word   0xdbe0500c\n\t"
206                "       cmp     %%o4, %%o5\n\t"
207                "       bne     1b\n\t"
208                "        sub    %%o5, 1, %%o5"
209                : "=&r" (tmp), "=&r" (ret)
210                : "r" (dest)
211                : "memory", "cc");
212
213         return ret;
214 }
215
216 G_GNUC_UNUSED
217 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
218 {
219        register volatile gint32 *dest asm("g1") = _dest;
220        register gint32 tmp asm("o4");
221        register gint32 ret asm("o5");
222
223        __asm__ __volatile__(
224                "1:     ld      [%%g1], %%o4\n\t"
225                "       mov     %3, %%o5\n\t"
226                /*      cas     [%%g1], %%o4, %%o5 */
227                "       .word   0xdbe0500c\n\t"
228                "       cmp     %%o4, %%o5\n\t"
229                "       bne     1b\n\t"
230                "        nop"
231                : "=&r" (tmp), "=&r" (ret)
232                : "r" (dest), "r" (exch)
233                : "memory", "cc");
234
235         return ret;
236 }
237
238 G_GNUC_UNUSED
239 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
240 {
241        register volatile gpointer *dest asm("g1") = _dest;
242        register gpointer tmp asm("o4");
243        register gpointer ret asm("o5");
244
245        __asm__ __volatile__(
246 #ifdef SPARCV9
247                "1:     ldx     [%%g1], %%o4\n\t"
248 #else
249                "1:     ld      [%%g1], %%o4\n\t"
250 #endif
251                "       mov     %3, %%o5\n\t"
252 #ifdef SPARCV9
253                /*      casx    [%%g1], %%o4, %%o5 */
254                "       .word   0xdbf0500c\n\t"
255 #else
256                /*      cas     [%%g1], %%o4, %%o5 */
257                "       .word   0xdbe0500c\n\t"
258 #endif
259                "       cmp     %%o4, %%o5\n\t"
260                "       bne     1b\n\t"
261                "        nop"
262                : "=&r" (tmp), "=&r" (ret)
263                : "r" (dest), "r" (exch)
264                : "memory", "cc");
265
266         return ret;
267 }
268
269 G_GNUC_UNUSED
270 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
271 {
272        register volatile gint32 *dest asm("g1") = _dest;
273        register gint32 tmp asm("o4");
274        register gint32 ret asm("o5");
275
276        __asm__ __volatile__(
277                "1:     ld      [%%g1], %%o4\n\t"
278                "       add     %%o4, %3, %%o5\n\t"
279                /*      cas     [%%g1], %%o4, %%o5 */
280                "       .word   0xdbe0500c\n\t"
281                "       cmp     %%o4, %%o5\n\t"
282                "       bne     1b\n\t"
283                "        add    %%o5, %3, %%o5"
284                : "=&r" (tmp), "=&r" (ret)
285                : "r" (dest), "r" (add)
286                : "memory", "cc");
287
288         return ret;
289 }
290
291 #elif __s390__
292
293 #define WAPI_ATOMIC_ASM
294
295 static inline gint32 
296 InterlockedCompareExchange(volatile gint32 *dest,
297                            gint32 exch, gint32 comp)
298 {
299         gint32 old;
300
301         __asm__ __volatile__ ("\tLA\t1,%0\n"
302                               "\tLR\t%1,%3\n"
303                               "\tCS\t%1,%2,0(1)\n"
304                               : "+m" (*dest), "=&r" (old)
305                               : "r" (exch), "r" (comp)
306                               : "1", "cc");     
307         return(old);
308 }
309
310 #ifndef __s390x__
311 static inline gpointer
312 InterlockedCompareExchangePointer(volatile gpointer *dest,
313                            gpointer exch, gpointer comp)
314 {
315         gpointer old;
316
317         __asm__ __volatile__ ("\tLA\t1,%0\n"
318                               "\tLR\t%1,%3\n"
319                               "\tCS\t%1,%2,0(1)\n"
320                               : "+m" (*dest), "=&r" (old)
321                               : "r" (exch), "r" (comp)
322                               : "1", "cc");     
323         return(old);
324 }
325 # else
326 static inline gpointer 
327 InterlockedCompareExchangePointer(volatile gpointer *dest, 
328                                   gpointer exch, 
329                                   gpointer comp)
330 {
331         gpointer old;
332
333         __asm__ __volatile__ ("\tLA\t1,%0\n"
334                               "\tLGR\t%1,%3\n"
335                               "\tCSG\t%1,%2,0(1)\n"
336                               : "+m" (*dest), "=&r" (old)
337                               : "r" (exch), "r" (comp)
338                               : "1", "cc");
339
340         return(old);
341 }
342 # endif
343
344 # ifndef __s390x__
345 static inline gint32 
346 InterlockedIncrement(volatile gint32 *val)
347 {
348         gint32 tmp;
349         
350         __asm__ __volatile__ ("\tLA\t2,%1\n"
351                               "0:\tL\t%0,%1\n"
352                               "\tLR\t1,%0\n"
353                               "\tAHI\t1,1\n"
354                               "\tCS\t%0,1,0(2)\n"
355                               "\tJNZ\t0b\n"
356                               "\tLR\t%0,1"
357                               : "=r" (tmp), "+m" (*val)
358                               : : "1", "2", "cc");
359
360         return(tmp);
361 }
362 # else
363 static inline gint32 
364 InterlockedIncrement(volatile gint32 *val)
365 {
366         gint32 tmp;
367         
368         __asm__ __volatile__ ("\tLA\t2,%1\n"
369                               "0:\tLGF\t%0,%1\n"
370                               "\tLGFR\t1,%0\n"
371                               "\tAGHI\t1,1\n"
372                               "\tCS\t%0,1,0(2)\n"
373                               "\tJNZ\t0b\n"
374                               "\tLGFR\t%0,1"
375                               : "=r" (tmp), "+m" (*val)
376                               : : "1", "2", "cc");
377
378         return(tmp);
379 }
380 # endif
381
382 # ifndef __s390x__
383 static inline gint32 
384 InterlockedDecrement(volatile gint32 *val)
385 {
386         gint32 tmp;
387         
388         __asm__ __volatile__ ("\tLA\t2,%1\n"
389                               "0:\tL\t%0,%1\n"
390                               "\tLR\t1,%0\n"
391                               "\tAHI\t1,-1\n"
392                               "\tCS\t%0,1,0(2)\n"
393                               "\tJNZ\t0b\n"
394                               "\tLR\t%0,1"
395                               : "=r" (tmp), "+m" (*val)
396                               : : "1", "2", "cc");
397
398         return(tmp);
399 }
400 # else
401 static inline gint32 
402 InterlockedDecrement(volatile gint32 *val)
403 {
404         gint32 tmp;
405         
406         __asm__ __volatile__ ("\tLA\t2,%1\n"
407                               "0:\tLGF\t%0,%1\n"
408                               "\tLGFR\t1,%0\n"
409                               "\tAGHI\t1,-1\n"
410                               "\tCS\t%0,1,0(2)\n"
411                               "\tJNZ\t0b\n"
412                               "\tLGFR\t%0,1"
413                               : "=r" (tmp), "+m" (*val)
414                               : : "1", "2", "cc");
415
416         return(tmp);
417 }
418 # endif
419
420 static inline gint32 
421 InterlockedExchange(volatile gint32 *val, gint32 new_val)
422 {
423         gint32 ret;
424         
425         __asm__ __volatile__ ("\tLA\t1,%0\n"
426                               "0:\tL\t%1,%0\n"
427                               "\tCS\t%1,%2,0(1)\n"
428                               "\tJNZ\t0b"
429                               : "+m" (*val), "=&r" (ret)
430                               : "r" (new_val)
431                               : "1", "cc");
432
433         return(ret);
434 }
435
436 # ifndef __s390x__
437 static inline gpointer 
438 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
439 {
440         gpointer ret;
441         
442         __asm__ __volatile__ ("\tLA\t1,%0\n"
443                               "0:\tL\t%1,%0\n"
444                               "\tCS\t%1,%2,0(1)\n"
445                               "\tJNZ\t0b"
446                               : "+m" (*val), "=&r" (ret)
447                               : "r" (new_val)
448                               : "1", "cc");
449
450         return(ret);
451 }
452 # else
453 static inline gpointer
454 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
455 {
456         gpointer ret;
457         
458         __asm__ __volatile__ ("\tLA\t1,%0\n"
459                               "0:\tLG\t%1,%0\n"
460                               "\tCSG\t%1,%2,0(1)\n"
461                               "\tJNZ\t0b"
462                               : "+m" (*val), "=&r" (ret)
463                               : "r" (new_val)
464                               : "1", "cc");
465
466         return(ret);
467 }
468 # endif
469
470 # ifndef __s390x__
471 static inline gint32 
472 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
473 {
474         gint32 ret;
475
476         __asm__ __volatile__ ("\tLA\t2,%1\n"
477                               "0:\tL\t%0,%1\n"
478                               "\tLR\t1,%0\n"
479                               "\tAR\t1,%2\n"
480                               "\tCS\t%0,1,0(2)\n"
481                               "\tJNZ\t0b"
482                               : "=&r" (ret), "+m" (*val)
483                               : "r" (add) 
484                               : "1", "2", "cc");
485         
486         return(ret);
487 }
488 # else
489 static inline gint32 
490 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
491 {
492         gint32 ret;
493
494         __asm__ __volatile__ ("\tLA\t2,%1\n"
495                               "0:\tLGF\t%0,%1\n"
496                               "\tLGFR\t1,%0\n"
497                               "\tAGR\t1,%2\n"
498                               "\tCS\t%0,1,0(2)\n"
499                               "\tJNZ\t0b"
500                               : "=&r" (ret), "+m" (*val)
501                               : "r" (add) 
502                               : "1", "2", "cc");
503         
504         return(ret);
505 }
506 # endif
507
508 #elif defined(__ppc__) || defined (__powerpc__)
509 #define WAPI_ATOMIC_ASM
510
511 static inline gint32 InterlockedIncrement(volatile gint32 *val)
512 {
513         gint32 result = 0, tmp;
514
515         __asm__ __volatile__ ("\n1:\n\t"
516                               "lwarx  %0, 0, %2\n\t"
517                               "addi   %1, %0, 1\n\t"
518                               "stwcx. %1, 0, %2\n\t"
519                               "bne-   1b"
520                               : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
521         return result + 1;
522 }
523
524 static inline gint32 InterlockedDecrement(volatile gint32 *val)
525 {
526         gint32 result = 0, tmp;
527
528         __asm__ __volatile__ ("\n1:\n\t"
529                               "lwarx  %0, 0, %2\n\t"
530                               "addi   %1, %0, -1\n\t"
531                               "stwcx. %1, 0, %2\n\t"
532                               "bne-   1b"
533                               : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
534         return result - 1;
535 }
536
537 #define InterlockedCompareExchangePointer(dest,exch,comp) (gpointer)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
538
539 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
540                                                 gint32 exch, gint32 comp) {
541         gint32 tmp = 0;
542
543         __asm__ __volatile__ ("\n1:\n\t"
544                              "lwarx   %0, 0, %1\n\t"
545                              "cmpw    %0, %2\n\t" 
546                              "bne-    2f\n\t"
547                              "stwcx.  %3, 0, %1\n\t"
548                              "bne-    1b\n"
549                              "2:"
550                              : "=&r" (tmp)
551                              : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
552         return(tmp);
553 }
554
555 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
556 {
557         gint32 tmp = 0;
558
559         __asm__ __volatile__ ("\n1:\n\t"
560                               "lwarx  %0, 0, %2\n\t"
561                               "stwcx. %3, 0, %2\n\t"
562                               "bne    1b"
563                               : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
564         return(tmp);
565 }
566 #define InterlockedExchangePointer(dest,exch) (gpointer)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
567
568 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
569 {
570         gint32 result, tmp;
571         __asm__ __volatile__ ("\n1:\n\t"
572                               "lwarx  %0, 0, %2\n\t"
573                               "add    %1, %0, %3\n\t"
574                               "stwcx. %1, 0, %2\n\t"
575                               "bne    1b"
576                               : "=&r" (result), "=&r" (tmp)
577                               : "r" (dest), "r" (add) : "cc", "memory");
578         return(result);
579 }
580
581 #elif defined(__arm__)
582 #define WAPI_ATOMIC_ASM
583
584 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
585 {
586         int a, b;
587
588         __asm__ __volatile__ (    "0:\n\t"
589                                   "ldr %1, [%2]\n\t"
590                                   "cmp %1, %4\n\t"
591                                   "mov %0, %1\n\t"
592                                   "bne 1f\n\t"
593                                   "swp %0, %3, [%2]\n\t"
594                                   "cmp %0, %1\n\t"
595                                   "swpne %3, %0, [%2]\n\t"
596                                   "bne 0b\n\t"
597                                   "1:"
598                                   : "=&r" (a), "=&r" (b)
599                                   : "r" (dest), "r" (exch), "r" (comp)
600                                   : "cc", "memory");
601
602         return a;
603 }
604
605 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
606 {
607         gpointer a, b;
608
609         __asm__ __volatile__ (    "0:\n\t"
610                                   "ldr %1, [%2]\n\t"
611                                   "cmp %1, %4\n\t"
612                                   "mov %0, %1\n\t"
613                                   "bne 1f\n\t"
614                                   "swpeq %0, %3, [%2]\n\t"
615                                   "cmp %0, %1\n\t"
616                                   "swpne %3, %0, [%2]\n\t"
617                                   "bne 0b\n\t"
618                                   "1:"
619                                   : "=&r" (a), "=&r" (b)
620                                   : "r" (dest), "r" (exch), "r" (comp)
621                                   : "cc", "memory");
622
623         return a;
624 }
625
626 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
627 {
628         int a, b, c;
629
630         __asm__ __volatile__ (  "0:\n\t"
631                                 "ldr %0, [%3]\n\t"
632                                 "add %1, %0, %4\n\t"
633                                 "swp %2, %1, [%3]\n\t"
634                                 "cmp %0, %2\n\t"
635                                 "swpne %1, %2, [%3]\n\t"
636                                 "bne 0b"
637                                 : "=&r" (a), "=&r" (b), "=&r" (c)
638                                 : "r" (dest), "r" (1)
639                                 : "cc", "memory");
640
641         return b;
642 }
643
644 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
645 {
646         int a, b, c;
647
648         __asm__ __volatile__ (  "0:\n\t"
649                                 "ldr %0, [%3]\n\t"
650                                 "add %1, %0, %4\n\t"
651                                 "swp %2, %1, [%3]\n\t"
652                                 "cmp %0, %2\n\t"
653                                 "swpne %1, %2, [%3]\n\t"
654                                 "bne 0b"
655                                 : "=&r" (a), "=&r" (b), "=&r" (c)
656                                 : "r" (dest), "r" (-1)
657                                 : "cc", "memory");
658
659         return b;
660 }
661
662 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
663 {
664         int a;
665
666         __asm__ __volatile__ (  "swp %0, %2, [%1]"
667                                 : "=&r" (a)
668                                 : "r" (dest), "r" (exch));
669
670         return a;
671 }
672
673 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
674 {
675         gpointer a;
676
677         __asm__ __volatile__ (  "swp %0, %2, [%1]"
678                                 : "=&r" (a)
679                                 : "r" (dest), "r" (exch));
680
681         return a;
682 }
683
684 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
685 {
686         int a, b, c;
687
688         __asm__ __volatile__ (  "0:\n\t"
689                                 "ldr %0, [%3]\n\t"
690                                 "add %1, %0, %4\n\t"
691                                 "swp %2, %1, [%3]\n\t"
692                                 "cmp %0, %2\n\t"
693                                 "swpne %1, %2, [%3]\n\t"
694                                 "bne 0b"
695                                 : "=&r" (a), "=&r" (b), "=&r" (c)
696                                 : "r" (dest), "r" (add)
697                                 : "cc", "memory");
698
699         return a;
700 }
701
702 #elif defined(__ia64__)
703 #define WAPI_ATOMIC_ASM
704
705 #ifdef __INTEL_COMPILER
706 #include <ia64intrin.h>
707 #endif
708
709 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
710                                                 gint32 exch, gint32 comp)
711 {
712         gint32 old;
713         guint64 real_comp;
714
715 #ifdef __INTEL_COMPILER
716         old = _InterlockedCompareExchange (dest, exch, comp);
717 #else
718         /* cmpxchg4 zero extends the value read from memory */
719         real_comp = (guint64)(guint32)comp;
720         asm volatile ("mov ar.ccv = %2 ;;\n\t"
721                                   "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
722                                   : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
723 #endif
724
725         return(old);
726 }
727
728 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
729                                                 gpointer exch, gpointer comp)
730 {
731         gpointer old;
732
733 #ifdef __INTEL_COMPILER
734         old = _InterlockedCompareExchangePointer (dest, exch, comp);
735 #else
736         asm volatile ("mov ar.ccv = %2 ;;\n\t"
737                                   "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
738                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
739 #endif
740
741         return(old);
742 }
743
744 static inline gint32 InterlockedIncrement(gint32 volatile *val)
745 {
746 #ifdef __INTEL_COMPILER
747         return _InterlockedIncrement (val);
748 #else
749         gint32 old;
750
751         do {
752                 old = *val;
753         } while (InterlockedCompareExchange (val, old + 1, old) != old);
754
755         return old + 1;
756 #endif
757 }
758
759 static inline gint32 InterlockedDecrement(gint32 volatile *val)
760 {
761 #ifdef __INTEL_COMPILER
762         return _InterlockedDecrement (val);
763 #else
764         gint32 old;
765
766         do {
767                 old = *val;
768         } while (InterlockedCompareExchange (val, old - 1, old) != old);
769
770         return old - 1;
771 #endif
772 }
773
774 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
775 {
776 #ifdef __INTEL_COMPILER
777         return _InterlockedExchange (dest, new_val);
778 #else
779         gint32 res;
780
781         do {
782                 res = *dest;
783         } while (InterlockedCompareExchange (dest, new_val, res) != res);
784
785         return res;
786 #endif
787 }
788
789 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
790 {
791 #ifdef __INTEL_COMPILER
792         return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
793 #else
794         gpointer res;
795
796         do {
797                 res = *dest;
798         } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
799
800         return res;
801 #endif
802 }
803
804 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
805 {
806         gint32 old;
807
808 #ifdef __INTEL_COMPILER
809         old = _InterlockedExchangeAdd (val, add);
810 #else
811         do {
812                 old = *val;
813         } while (InterlockedCompareExchange (val, old + add, old) != old);
814
815         return old;
816 #endif
817 }
818
819 #elif defined(__alpha__)
820 #define WAPI_ATOMIC_ASM
821
822 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
823                                                 gint32 exch, gint32 comp)
824 {
825         gint32 old, temp, temp2;
826         long compq = comp, exchq = exch;
827
828         __asm__ __volatile__ (
829                 "1:     ldl_l %2, %0\n"
830                 "       mov %2, %1\n"
831                 "       cmpeq %2, %5, %3\n"
832                 "       cmovne %3, %4, %2\n"
833                 "       stl_c %2, %0\n"
834                 "       beq %2, 1b\n"
835                 : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
836                 : "r" (exchq), "r" (compq), "m" (*dest));
837         return(old);
838 }
839
840 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
841 {
842         gpointer old, temp, temp2;
843
844         __asm__ __volatile__ (
845                 "1:     ldq_l %2, %0\n"
846                 "       mov %2, %1\n"
847                 "       cmpeq %2, %5, %3\n"
848                 "       cmovne %3, %4, %2\n"
849                 "       stq_c %2, %0\n"
850                 "       beq %2, 1b\n"
851                 : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
852                 : "r" (exch), "r" (comp), "m" (*dest));
853         return(old);
854 }
855
856 static inline gint32 InterlockedIncrement(volatile gint32 *val)
857 {
858         gint32 temp, cur;
859         
860         __asm__ __volatile__ (
861                 "1:     ldl_l %0, %1\n"
862                 "       addl %0, %3, %0\n"
863                 "       mov %0, %2\n"
864                 "       stl_c %0, %1\n"
865                 "       beq %0, 1b\n"
866                 : "=&r" (temp), "=m" (*val), "=r" (cur)
867                 : "Ir" (1), "m" (*val));
868         return(cur);
869 }
870
871 static inline gint32 InterlockedDecrement(volatile gint32 *val)
872 {
873         gint32 temp, cur;
874         
875         __asm__ __volatile__ (
876                 "1:     ldl_l %0, %1\n"
877                 "       subl %0, %3, %0\n"
878                 "       mov %0, %2\n"
879                 "       stl_c %0, %1\n"
880                 "       beq %0, 1b\n"
881                 : "=&r" (temp), "=m" (*val), "=r" (cur)
882                 : "Ir" (1), "m" (*val));
883         return(cur);
884 }
885
886 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
887 {
888         gint32 ret, temp;
889
890         __asm__ __volatile__ (
891                 "1:     ldl_l %1, %0\n"
892                 "       mov %3, %2\n"
893                 "       stl_c %2, %0\n"
894                 "       beq %2, 1b\n"
895                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
896                 : "r" (new_val), "m" (*val));
897         return(ret);
898 }
899
900 static inline gpointer InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
901 {
902         gpointer ret, temp;
903
904         __asm__ __volatile__ (
905                 "1:     ldq_l %1, %0\n"
906                 "       mov %3, %2\n"
907                 "       stq_c %2, %0\n"
908                 "       beq %2, 1b\n"
909                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
910                 : "r" (new_val), "m" (*val));
911         return(ret);
912 }
913
914 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
915 {
916         gint32 ret, temp;
917         
918         __asm__ __volatile__ (
919                 "1:     ldl_l   %2, %0\n"
920                 "       mov     %2, %1\n"
921                 "       addl    %2, %3, %2\n"
922                 "       stl_c   %2, %0\n"
923                 "       beq     %2, 1b\n"
924                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
925                 : "r" (add), "m" (*val));
926         
927         return(ret);
928 }
929
930 #elif defined(__mips__)
931 #define WAPI_ATOMIC_ASM
932
933 static inline gint32 InterlockedIncrement(volatile gint32 *val)
934 {
935         gint32 tmp, result = 0;
936
937         __asm__ __volatile__ ("    .set    mips32\n"
938                               "1:  ll      %0, %2\n"
939                               "    addu    %1, %0, 1\n"
940                               "    sc      %1, %2\n"
941                               "    beqz    %1, 1b\n"
942                               "    .set    mips0\n"
943                               : "=&r" (result), "=&r" (tmp), "=m" (*val)
944                               : "m" (*val));
945         return result + 1;
946 }
947
948 static inline gint32 InterlockedDecrement(volatile gint32 *val)
949 {
950         gint32 tmp, result = 0;
951
952         __asm__ __volatile__ ("    .set    mips32\n"
953                               "1:  ll      %0, %2\n"
954                               "    subu    %1, %0, 1\n"
955                               "    sc      %1, %2\n"
956                               "    beqz    %1, 1b\n"
957                               "    .set    mips0\n"
958                               : "=&r" (result), "=&r" (tmp), "=m" (*val)
959                               : "m" (*val));
960         return result - 1;
961 }
962
963 #define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
964
965 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
966                                                 gint32 exch, gint32 comp) {
967         gint32 old, tmp;
968
969         __asm__ __volatile__ ("    .set    mips32\n"
970                               "1:  ll      %0, %2\n"
971                               "    bne     %0, %5, 2f\n"
972                               "    move    %1, %4\n"
973                               "    sc      %1, %2\n"
974                               "    beqz    %1, 1b\n"
975                               "2:  .set    mips0\n"
976                               : "=&r" (old), "=&r" (tmp), "=m" (*dest)
977                               : "m" (*dest), "r" (exch), "r" (comp));
978         return(old);
979 }
980
981 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
982 {
983         gint32 result, tmp;
984
985         __asm__ __volatile__ ("    .set    mips32\n"
986                               "1:  ll      %0, %2\n"
987                               "    move    %1, %4\n"
988                               "    sc      %1, %2\n"
989                               "    beqz    %1, 1b\n"
990                               "    .set    mips0\n"
991                               : "=&r" (result), "=&r" (tmp), "=m" (*dest)
992                               : "m" (*dest), "r" (exch));
993         return(result);
994 }
995 #define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
996
997 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
998 {
999         gint32 result, tmp;
1000
1001         __asm__ __volatile__ ("    .set    mips32\n"
1002                               "1:  ll      %0, %2\n"
1003                               "    addu    %1, %0, %4\n"
1004                               "    sc      %1, %2\n"
1005                               "    beqz    %1, 1b\n"
1006                               "    .set    mips0\n"
1007                               : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1008                               : "m" (*dest), "r" (add));
1009         return result;
1010 }
1011
1012 #else
1013
1014 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1015 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1016 extern gint32 InterlockedIncrement(volatile gint32 *dest);
1017 extern gint32 InterlockedDecrement(volatile gint32 *dest);
1018 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1019 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1020 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1021
1022 #if defined(__hppa__)
1023 #define WAPI_ATOMIC_ASM
1024 #endif
1025
1026 #endif
1027
1028 #endif /* _WAPI_ATOMIC_H_ */