fixed tests
[mono.git] / mono / io-layer / atomic.h
1 /*
2  * atomic.h:  Atomic operations
3  *
4  * Author:
5  *      Dick Porter (dick@ximian.com)
6  *
7  * (C) 2002 Ximian, Inc.
8  */
9
10 #ifndef _WAPI_ATOMIC_H_
11 #define _WAPI_ATOMIC_H_
12
13 #include <glib.h>
14
15 #include "mono/io-layer/wapi.h"
16
17 #if defined(__i386__) || defined(__x86_64__)
18 #define WAPI_ATOMIC_ASM
19
20 /*
21  * NB: The *Pointer() functions here assume that
22  * sizeof(pointer)==sizeof(gint32)
23  *
24  * NB2: These asm functions assume 486+ (some of the opcodes dont
25  * exist on 386).  If this becomes an issue, we can get configure to
26  * fall back to the non-atomic C versions of these calls.
27  */
28
29 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
30                                                 gint32 exch, gint32 comp)
31 {
32         gint32 old;
33
34         __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
35                               : "=m" (*dest), "=a" (old)
36                               : "r" (exch), "m" (*dest), "a" (comp));   
37         return(old);
38 }
39
40 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
41 {
42         gpointer old;
43
44         __asm__ __volatile__ ("lock; "
45 #ifdef __x86_64__
46                               "cmpxchgq"
47 #else
48                               "cmpxchgl"
49 #endif
50                               " %2, %0"
51                               : "=m" (*dest), "=a" (old)
52                               : "r" (exch), "m" (*dest), "a" (comp));   
53
54         return(old);
55 }
56
57 static inline gint32 InterlockedIncrement(volatile gint32 *val)
58 {
59         gint32 tmp;
60         
61         __asm__ __volatile__ ("lock; xaddl %0, %1"
62                               : "=r" (tmp), "=m" (*val)
63                               : "0" (1), "m" (*val));
64
65         return(tmp+1);
66 }
67
68 static inline gint32 InterlockedDecrement(volatile gint32 *val)
69 {
70         gint32 tmp;
71         
72         __asm__ __volatile__ ("lock; xaddl %0, %1"
73                               : "=r" (tmp), "=m" (*val)
74                               : "0" (-1), "m" (*val));
75
76         return(tmp-1);
77 }
78
79 /*
80  * See
81  * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
82  * for the reasons for using cmpxchg and a loop here.
83  *
84  * That url is no longer valid, but it's still in the google cache at the
85  * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
86  *
87  * For the time being, http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
88  * might work.  Bet it will change soon enough though.
89  */
90 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
91 {
92         gint32 ret;
93         
94         __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
95                               : "=m" (*val), "=a" (ret)
96                               : "r" (new_val), "m" (*val), "a" (*val));
97
98         return(ret);
99 }
100
101 static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
102                                                   gpointer new_val)
103 {
104         gpointer ret;
105         
106         __asm__ __volatile__ ("1:; lock; "
107 #ifdef __x86_64__
108                               "cmpxchgq"
109 #else
110                               "cmpxchgl"
111 #endif
112                               " %2, %0; jne 1b"
113                               : "=m" (*val), "=a" (ret)
114                               : "r" (new_val), "m" (*val), "a" (*val));
115
116         return(ret);
117 }
118
119 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
120 {
121         gint32 ret;
122         
123         __asm__ __volatile__ ("lock; xaddl %0, %1"
124                               : "=r" (ret), "=m" (*val)
125                               : "0" (add), "m" (*val));
126         
127         return(ret);
128 }
129
130 #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
131 #define WAPI_ATOMIC_ASM
132
133 G_GNUC_UNUSED 
134 static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
135 {
136        register volatile gint32 *dest asm("g1") = _dest;
137        register gint32 comp asm("o4") = _comp;
138        register gint32 exch asm("o5") = _exch;
139
140        __asm__ __volatile__(
141                /* cas [%%g1], %%o4, %%o5 */
142                ".word 0xdbe0500c"
143                : "=r" (exch)
144                : "0" (exch), "r" (dest), "r" (comp)
145                : "memory");
146
147        return exch;
148 }
149
150 G_GNUC_UNUSED 
151 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
152 {
153        register volatile gpointer *dest asm("g1") = _dest;
154        register gpointer comp asm("o4") = _comp;
155        register gpointer exch asm("o5") = _exch;
156
157        __asm__ __volatile__(
158 #ifdef SPARCV9
159                /* casx [%%g1], %%o4, %%o5 */
160                ".word 0xdbf0500c"
161 #else
162                /* cas [%%g1], %%o4, %%o5 */
163                ".word 0xdbe0500c"
164 #endif
165                : "=r" (exch)
166                : "0" (exch), "r" (dest), "r" (comp)
167                : "memory");
168
169        return exch;
170 }
171
172 G_GNUC_UNUSED 
173 static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
174 {
175        register volatile gint32 *dest asm("g1") = _dest;
176        register gint32 tmp asm("o4");
177        register gint32 ret asm("o5");
178
179        __asm__ __volatile__(
180                "1:     ld      [%%g1], %%o4\n\t"
181                "       add     %%o4, 1, %%o5\n\t"
182                /*      cas     [%%g1], %%o4, %%o5 */
183                "       .word   0xdbe0500c\n\t"
184                "       cmp     %%o4, %%o5\n\t"
185                "       bne     1b\n\t"
186                "        add    %%o5, 1, %%o5"
187                : "=&r" (tmp), "=&r" (ret)
188                : "r" (dest)
189                : "memory", "cc");
190
191         return ret;
192 }
193
194 G_GNUC_UNUSED 
195 static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
196 {
197        register volatile gint32 *dest asm("g1") = _dest;
198        register gint32 tmp asm("o4");
199        register gint32 ret asm("o5");
200
201        __asm__ __volatile__(
202                "1:     ld      [%%g1], %%o4\n\t"
203                "       sub     %%o4, 1, %%o5\n\t"
204                /*      cas     [%%g1], %%o4, %%o5 */
205                "       .word   0xdbe0500c\n\t"
206                "       cmp     %%o4, %%o5\n\t"
207                "       bne     1b\n\t"
208                "        sub    %%o5, 1, %%o5"
209                : "=&r" (tmp), "=&r" (ret)
210                : "r" (dest)
211                : "memory", "cc");
212
213         return ret;
214 }
215
216 G_GNUC_UNUSED
217 static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
218 {
219        register volatile gint32 *dest asm("g1") = _dest;
220        register gint32 tmp asm("o4");
221        register gint32 ret asm("o5");
222
223        __asm__ __volatile__(
224                "1:     ld      [%%g1], %%o4\n\t"
225                "       mov     %3, %%o5\n\t"
226                /*      cas     [%%g1], %%o4, %%o5 */
227                "       .word   0xdbe0500c\n\t"
228                "       cmp     %%o4, %%o5\n\t"
229                "       bne     1b\n\t"
230                "        nop"
231                : "=&r" (tmp), "=&r" (ret)
232                : "r" (dest), "r" (exch)
233                : "memory", "cc");
234
235         return ret;
236 }
237
238 G_GNUC_UNUSED
239 static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
240 {
241        register volatile gpointer *dest asm("g1") = _dest;
242        register gpointer tmp asm("o4");
243        register gpointer ret asm("o5");
244
245        __asm__ __volatile__(
246 #ifdef SPARCV9
247                "1:     ldx     [%%g1], %%o4\n\t"
248 #else
249                "1:     ld      [%%g1], %%o4\n\t"
250 #endif
251                "       mov     %3, %%o5\n\t"
252 #ifdef SPARCV9
253                /*      casx    [%%g1], %%o4, %%o5 */
254                "       .word   0xdbf0500c\n\t"
255 #else
256                /*      cas     [%%g1], %%o4, %%o5 */
257                "       .word   0xdbe0500c\n\t"
258 #endif
259                "       cmp     %%o4, %%o5\n\t"
260                "       bne     1b\n\t"
261                "        nop"
262                : "=&r" (tmp), "=&r" (ret)
263                : "r" (dest), "r" (exch)
264                : "memory", "cc");
265
266         return ret;
267 }
268
269 G_GNUC_UNUSED
270 static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
271 {
272        register volatile gint32 *dest asm("g1") = _dest;
273        register gint32 tmp asm("o4");
274        register gint32 ret asm("o5");
275
276        __asm__ __volatile__(
277                "1:     ld      [%%g1], %%o4\n\t"
278                "       add     %%o4, %3, %%o5\n\t"
279                /*      cas     [%%g1], %%o4, %%o5 */
280                "       .word   0xdbe0500c\n\t"
281                "       cmp     %%o4, %%o5\n\t"
282                "       bne     1b\n\t"
283                "        add    %%o5, %3, %%o5"
284                : "=&r" (tmp), "=&r" (ret)
285                : "r" (dest), "r" (add)
286                : "memory", "cc");
287
288         return ret;
289 }
290
291 #elif __s390__
292
293 #define WAPI_ATOMIC_ASM
294
295 static inline gint32 
296 InterlockedCompareExchange(volatile gint32 *dest,
297                            gint32 exch, gint32 comp)
298 {
299         gint32 old;
300
301         __asm__ __volatile__ ("\tLA\t1,%0\n"
302                               "\tLR\t%1,%3\n"
303                               "\tCS\t%1,%2,0(1)\n"
304                               : "+m" (*dest), "=r" (old)
305                               : "r" (exch), "r" (comp)
306                               : "1", "cc");     
307         return(old);
308 }
309
310 #ifndef __s390x__
311 static inline gpointer
312 InterlockedCompareExchangePointer(volatile gpointer *dest,
313                            gpointer exch, gpointer comp)
314 {
315         gpointer old;
316
317         __asm__ __volatile__ ("\tLA\t1,%0\n"
318                               "\tLR\t%1,%3\n"
319                               "\tCS\t%1,%2,0(1)\n"
320                               : "+m" (*dest), "=r" (old)
321                               : "r" (exch), "r" (comp)
322                               : "1", "cc");     
323         return(old);
324 }
325 # else
326 static inline gpointer 
327 InterlockedCompareExchangePointer(volatile gpointer *dest, 
328                                   gpointer exch, 
329                                   gpointer comp)
330 {
331         gpointer old;
332
333         __asm__ __volatile__ ("\tLA\t1,%0\n"
334                               "\tLGR\t%1,%3\n"
335                               "\tCSG\t%1,%2,0(1)\n"
336                               : "+m" (*dest), "=r" (old)
337                               : "r" (exch), "r" (comp)
338                               : "1", "cc");
339
340         return(old);
341 }
342 # endif
343
344 # ifndef __s390x__
345 static inline gint32 
346 InterlockedIncrement(volatile gint32 *val)
347 {
348         gint32 tmp;
349         
350         __asm__ __volatile__ ("\tLA\t2,%1\n"
351                               "0:\tL\t%0,%1\n"
352                               "\tLR\t1,%0\n"
353                               "\tAHI\t1,1\n"
354                               "\tCS\t%0,1,0(2)\n"
355                               "\tJNZ\t0b\n"
356                               "\tLR\t%0,1"
357                               : "=r" (tmp), "+m" (*val)
358                               : : "1", "2", "cc");
359
360         return(tmp);
361 }
362 # else
363 static inline gint32 
364 InterlockedIncrement(volatile gint32 *val)
365 {
366         gint32 tmp;
367         
368         __asm__ __volatile__ ("\tLA\t2,%1\n"
369                               "0:\tLGF\t%0,%1\n"
370                               "\tLGFR\t1,%0\n"
371                               "\tAGHI\t1,1\n"
372                               "\tCS\t%0,1,0(2)\n"
373                               "\tJNZ\t0b\n"
374                               "\tLGFR\t%0,1"
375                               : "=r" (tmp), "+m" (*val)
376                               : : "1", "2", "cc");
377
378         return(tmp);
379 }
380 # endif
381
382 # ifndef __s390x__
383 static inline gint32 
384 InterlockedDecrement(volatile gint32 *val)
385 {
386         gint32 tmp;
387         
388         __asm__ __volatile__ ("\tLA\t2,%1\n"
389                               "0:\tL\t%0,%1\n"
390                               "\tLR\t1,%0\n"
391                               "\tAHI\t1,-1\n"
392                               "\tCS\t%0,1,0(2)\n"
393                               "\tJNZ\t0b\n"
394                               "\tLR\t%0,1"
395                               : "=r" (tmp), "+m" (*val)
396                               : : "1", "2", "cc");
397
398         return(tmp);
399 }
400 # else
401 static inline gint32 
402 InterlockedDecrement(volatile gint32 *val)
403 {
404         gint32 tmp;
405         
406         __asm__ __volatile__ ("\tLA\t2,%1\n"
407                               "0:\tLGF\t%0,%1\n"
408                               "\tLGFR\t1,%0\n"
409                               "\tAGHI\t1,-1\n"
410                               "\tCS\t%0,1,0(2)\n"
411                               "\tJNZ\t0b\n"
412                               "\tLGFR\t%0,1"
413                               : "=r" (tmp), "+m" (*val)
414                               : : "1", "2", "cc");
415
416         return(tmp);
417 }
418 # endif
419
420 static inline gint32 
421 InterlockedExchange(volatile gint32 *val, gint32 new_val)
422 {
423         gint32 ret;
424         
425         __asm__ __volatile__ ("\tLA\t1,%0\n"
426                               "0:\tL\t%1,%0\n"
427                               "\tCS\t%1,%2,0(1)\n"
428                               "\tJNZ\t0b"
429                               : "+m" (*val), "=r" (ret)
430                               : "r" (new_val)
431                               : "1", "cc");
432
433         return(ret);
434 }
435
436 # ifndef __s390x__
437 static inline gpointer 
438 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
439 {
440         gpointer ret;
441         
442         __asm__ __volatile__ ("\tLA\t1,%0\n"
443                               "0:\tL\t%1,%0\n"
444                               "\tCS\t%1,%2,0(1)\n"
445                               "\tJNZ\t0b"
446                               : "+m" (*val), "=r" (ret)
447                               : "r" (new_val)
448                               : "1", "cc");
449
450         return(ret);
451 }
452 # else
453 static inline gpointer
454 InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
455 {
456         gpointer ret;
457         
458         __asm__ __volatile__ ("\tLA\t1,%0\n"
459                               "0:\tLG\t%1,%0\n"
460                               "\tCSG\t%1,%2,0(1)\n"
461                               "\tJNZ\t0b"
462                               : "+m" (*val), "=r" (ret)
463                               : "r" (new_val)
464                               : "1", "cc");
465
466         return(ret);
467 }
468 # endif
469
470 # ifndef __s390x__
471 static inline gint32 
472 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
473 {
474         gint32 ret;
475
476         __asm__ __volatile__ ("\tLA\t2,%1\n"
477                               "0:\tL\t%0,%1\n"
478                               "\tLR\t1,%0\n"
479                               "\tAR\t1,%2\n"
480                               "\tCS\t%0,1,0(2)\n"
481                               "\tJNZ\t0b"
482                               : "=r" (ret), "+m" (*val)
483                               : "r" (add) 
484                               : "1", "2", "cc");
485         
486         return(ret);
487 }
488 # else
489 static inline gint32 
490 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
491 {
492         gint32 ret;
493
494         __asm__ __volatile__ ("\tLA\t2,%1\n"
495                               "0:\tLGF\t%0,%1\n"
496                               "\tLGFR\t1,%0\n"
497                               "\tAGR\t1,%2\n"
498                               "\tCS\t%0,1,0(2)\n"
499                               "\tJNZ\t0b"
500                               : "=r" (ret), "+m" (*val)
501                               : "r" (add) 
502                               : "1", "2", "cc");
503         
504         return(ret);
505 }
506 # endif
507
508 #elif defined(__ppc__) || defined (__powerpc__)
509 #define WAPI_ATOMIC_ASM
510
511 static inline gint32 InterlockedIncrement(volatile gint32 *val)
512 {
513         gint32 result = 0, tmp;
514
515         __asm__ __volatile__ ("\n1:\n\t"
516                               "lwarx  %0, 0, %2\n\t"
517                               "addi   %1, %0, 1\n\t"
518                               "stwcx. %1, 0, %2\n\t"
519                               "bne-   1b"
520                               : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
521         return result + 1;
522 }
523
524 static inline gint32 InterlockedDecrement(volatile gint32 *val)
525 {
526         gint32 result = 0, tmp;
527
528         __asm__ __volatile__ ("\n1:\n\t"
529                               "lwarx  %0, 0, %2\n\t"
530                               "addi   %1, %0, -1\n\t"
531                               "stwcx. %1, 0, %2\n\t"
532                               "bne-   1b"
533                               : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
534         return result - 1;
535 }
536
537 #define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
538
539 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
540                                                 gint32 exch, gint32 comp) {
541         gint32 tmp = 0;
542
543         __asm__ __volatile__ ("\n1:\n\t"
544                              "lwarx   %0, 0, %1\n\t"
545                              "cmpw    %0, %2\n\t" 
546                              "bne-    2f\n\t"
547                              "stwcx.  %3, 0, %1\n\t"
548                              "bne-    1b\n"
549                              "2:"
550                              : "=&r" (tmp)
551                              : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
552         return(tmp);
553 }
554
555 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
556 {
557         gint32 tmp = 0;
558
559         __asm__ __volatile__ ("\n1:\n\t"
560                               "lwarx  %0, 0, %2\n\t"
561                               "stwcx. %3, 0, %2\n\t"
562                               "bne    1b"
563                               : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
564         return(tmp);
565 }
566 #define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
567
568 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
569 {
570         gint32 result, tmp;
571         __asm__ __volatile__ ("\n1:\n\t"
572                               "lwarx  %0, 0, %2\n\t"
573                               "add    %1, %0, %3\n\t"
574                               "stwcx. %1, 0, %2\n\t"
575                               "bne    1b"
576                               : "=&r" (result), "=&r" (tmp)
577                               : "r" (dest), "r" (add) : "cc", "memory");
578         return(result);
579 }
580
581 #elif defined(__arm__)
582 #define WAPI_ATOMIC_ASM
583
584 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
585 {
586         int a, b;
587
588         __asm__ __volatile__ (    "0:\n\t"
589                                   "ldr %1, [%2]\n\t"
590                                   "cmp %1, %4\n\t"
591                                   "mov %0, %1\n\t"
592                                   "bne 1f\n\t"
593                                   "swp %0, %3, [%2]\n\t"
594                                   "cmp %0, %1\n\t"
595                                   "swpne %3, %0, [%2]\n\t"
596                                   "bne 0b\n\t"
597                                   "1:"
598                                   : "=&r" (a), "=&r" (b)
599                                   : "r" (dest), "r" (exch), "r" (comp)
600                                   : "cc", "memory");
601
602         return a;
603 }
604
605 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
606 {
607         gpointer a, b;
608
609         __asm__ __volatile__ (    "0:\n\t"
610                                   "ldr %1, [%2]\n\t"
611                                   "cmp %1, %4\n\t"
612                                   "mov %0, %1\n\t"
613                                   "bne 1f\n\t"
614                                   "swpeq %0, %3, [%2]\n\t"
615                                   "cmp %0, %1\n\t"
616                                   "swpne %3, %0, [%2]\n\t"
617                                   "bne 0b\n\t"
618                                   "1:"
619                                   : "=&r" (a), "=&r" (b)
620                                   : "r" (dest), "r" (exch), "r" (comp)
621                                   : "cc", "memory");
622
623         return a;
624 }
625
626 static inline gint32 InterlockedIncrement(volatile gint32 *dest)
627 {
628         int a, b, c;
629
630         __asm__ __volatile__ (  "0:\n\t"
631                                 "ldr %0, [%3]\n\t"
632                                 "add %1, %0, %4\n\t"
633                                 "swp %2, %1, [%3]\n\t"
634                                 "cmp %0, %2\n\t"
635                                 "swpne %1, %2, [%3]\n\t"
636                                 "bne 0b"
637                                 : "=&r" (a), "=&r" (b), "=&r" (c)
638                                 : "r" (dest), "r" (1)
639                                 : "cc", "memory");
640
641         return b;
642 }
643
644 static inline gint32 InterlockedDecrement(volatile gint32 *dest)
645 {
646         int a, b, c;
647
648         __asm__ __volatile__ (  "0:\n\t"
649                                 "ldr %0, [%3]\n\t"
650                                 "add %1, %0, %4\n\t"
651                                 "swp %2, %1, [%3]\n\t"
652                                 "cmp %0, %2\n\t"
653                                 "swpne %1, %2, [%3]\n\t"
654                                 "bne 0b"
655                                 : "=&r" (a), "=&r" (b), "=&r" (c)
656                                 : "r" (dest), "r" (-1)
657                                 : "cc", "memory");
658
659         return b;
660 }
661
662 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
663 {
664         int a;
665
666         __asm__ __volatile__ (  "swp %0, %2, [%1]"
667                                 : "=&r" (a)
668                                 : "r" (dest), "r" (exch));
669
670         return a;
671 }
672
673 static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
674 {
675         gpointer a;
676
677         __asm__ __volatile__ (  "swp %0, %2, [%1]"
678                                 : "=&r" (a)
679                                 : "r" (dest), "r" (exch));
680
681         return a;
682 }
683
684 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
685 {
686         int a, b, c;
687
688         __asm__ __volatile__ (  "0:\n\t"
689                                 "ldr %0, [%3]\n\t"
690                                 "add %1, %0, %4\n\t"
691                                 "swp %2, %1, [%3]\n\t"
692                                 "cmp %0, %2\n\t"
693                                 "swpne %1, %2, [%3]\n\t"
694                                 "bne 0b"
695                                 : "=&r" (a), "=&r" (b), "=&r" (c)
696                                 : "r" (dest), "r" (add)
697                                 : "cc", "memory");
698
699         return a;
700 }
701
702 #elif defined(__ia64__)
703 #define WAPI_ATOMIC_ASM
704
705 #ifdef __INTEL_COMPILER
706 #include <ia64intrin.h>
707 #endif
708
709 static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
710                                                 gint32 exch, gint32 comp)
711 {
712         gint32 old;
713
714 #ifdef __INTEL_COMPILER
715         old = _InterlockedCompareExchange (dest, exch, comp);
716 #else
717         asm volatile ("mov ar.ccv = %2 ;;\n\t"
718                                   "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
719                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
720 #endif
721
722         return(old);
723 }
724
725 static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
726                                                 gpointer exch, gpointer comp)
727 {
728         gpointer old;
729
730 #ifdef __INTEL_COMPILER
731         old = _InterlockedCompareExchangePointer (dest, exch, comp);
732 #else
733         asm volatile ("mov ar.ccv = %2 ;;\n\t"
734                                   "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
735                                   : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
736 #endif
737
738         return(old);
739 }
740
741 static inline gint32 InterlockedIncrement(gint32 volatile *val)
742 {
743 #ifdef __INTEL_COMPILER
744         return _InterlockedIncrement (val);
745 #else
746         gint32 old;
747
748         do {
749                 old = *val;
750         } while (InterlockedCompareExchange (val, old + 1, old) != old);
751
752         return old + 1;
753 #endif
754 }
755
756 static inline gint32 InterlockedDecrement(gint32 volatile *val)
757 {
758 #ifdef __INTEL_COMPILER
759         return _InterlockedDecrement (val);
760 #else
761         gint32 old;
762
763         do {
764                 old = *val;
765         } while (InterlockedCompareExchange (val, old - 1, old) != old);
766
767         return old - 1;
768 #endif
769 }
770
771 static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
772 {
773 #ifdef __INTEL_COMPILER
774         return _InterlockedExchange (dest, new_val);
775 #else
776         gint32 res;
777
778         do {
779                 res = *dest;
780         } while (InterlockedCompareExchange (dest, new_val, res) != res);
781
782         return res;
783 #endif
784 }
785
786 static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
787 {
788 #ifdef __INTEL_COMPILER
789         return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
790 #else
791         gpointer res;
792
793         do {
794                 res = *dest;
795         } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
796
797         return res;
798 #endif
799 }
800
801 static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
802 {
803         gint32 old;
804
805 #ifdef __INTEL_COMPILER
806         old = _InterlockedExchangeAdd (val, add);
807 #else
808         do {
809                 old = *val;
810         } while (InterlockedCompareExchange (val, old + add, old) != old);
811
812         return old;
813 #endif
814 }
815
816 #elif defined(__alpha__)
817 #define WAPI_ATOMIC_ASM
818
819 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
820                                                 gint32 exch, gint32 comp)
821 {
822         gint32 old, temp, temp2;
823         long compq = comp, exchq = exch;
824
825         __asm__ __volatile__ (
826                 "1:     ldl_l %2, %0\n"
827                 "       mov %2, %1\n"
828                 "       cmpeq %2, %5, %3\n"
829                 "       cmovne %3, %4, %2\n"
830                 "       stl_c %2, %0\n"
831                 "       beq %2, 1b\n"
832                 : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
833                 : "r" (exchq), "r" (compq), "m" (*dest));
834         return(old);
835 }
836
837 static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
838 {
839         gpointer old, temp, temp2;
840
841         __asm__ __volatile__ (
842                 "1:     ldq_l %2, %0\n"
843                 "       mov %2, %1\n"
844                 "       cmpeq %2, %5, %3\n"
845                 "       cmovne %3, %4, %2\n"
846                 "       stq_c %2, %0\n"
847                 "       beq %2, 1b\n"
848                 : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
849                 : "r" (exch), "r" (comp), "m" (*dest));
850         return(old);
851 }
852
853 static inline gint32 InterlockedIncrement(volatile gint32 *val)
854 {
855         gint32 temp, cur;
856         
857         __asm__ __volatile__ (
858                 "1:     ldl_l %0, %1\n"
859                 "       addl %0, %3, %0\n"
860                 "       mov %0, %2\n"
861                 "       stl_c %0, %1\n"
862                 "       beq %0, 1b\n"
863                 : "=&r" (temp), "=m" (*val), "=r" (cur)
864                 : "Ir" (1), "m" (*val));
865         return(cur);
866 }
867
868 static inline gint32 InterlockedDecrement(volatile gint32 *val)
869 {
870         gint32 temp, cur;
871         
872         __asm__ __volatile__ (
873                 "1:     ldl_l %0, %1\n"
874                 "       subl %0, %3, %0\n"
875                 "       mov %0, %2\n"
876                 "       stl_c %0, %1\n"
877                 "       beq %0, 1b\n"
878                 : "=&r" (temp), "=m" (*val), "=r" (cur)
879                 : "Ir" (1), "m" (*val));
880         return(cur);
881 }
882
883 static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
884 {
885         gint32 ret, temp;
886
887         __asm__ __volatile__ (
888                 "1:     ldl_l %1, %0\n"
889                 "       mov %3, %2\n"
890                 "       stl_c %2, %0\n"
891                 "       beq %2, 1b\n"
892                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
893                 : "r" (new_val), "m" (*val));
894         return(ret);
895 }
896
897 static inline gpointer InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
898 {
899         gpointer ret, temp;
900
901         __asm__ __volatile__ (
902                 "1:     ldq_l %1, %0\n"
903                 "       mov %3, %2\n"
904                 "       stq_c %2, %0\n"
905                 "       beq %2, 1b\n"
906                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
907                 : "r" (new_val), "m" (*val));
908         return(ret);
909 }
910
911 static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
912 {
913         gint32 ret, temp;
914         
915         __asm__ __volatile__ (
916                 "1:     ldl_l   %2, %0\n"
917                 "       mov     %2, %1\n"
918                 "       addl    %2, %3, %2\n"
919                 "       stl_c   %2, %0\n"
920                 "       beq     %2, 1b\n"
921                 : "=m" (*val), "=&r" (ret), "=&r" (temp)
922                 : "r" (add), "m" (*val));
923         
924         return(ret);
925 }
926
927 #elif defined(__mips__)
928 #define WAPI_ATOMIC_ASM
929
930 static inline gint32 InterlockedIncrement(volatile gint32 *val)
931 {
932         gint32 tmp, result = 0;
933
934         __asm__ __volatile__ ("    .set    mips32\n"
935                               "1:  ll      %0, %2\n"
936                               "    addu    %1, %0, 1\n"
937                               "    sc      %1, %2\n"
938                               "    beqz    %1, 1b\n"
939                               "    .set    mips0\n"
940                               : "=&r" (result), "=&r" (tmp), "=m" (*val)
941                               : "m" (*val));
942         return result + 1;
943 }
944
945 static inline gint32 InterlockedDecrement(volatile gint32 *val)
946 {
947         gint32 tmp, result = 0;
948
949         __asm__ __volatile__ ("    .set    mips32\n"
950                               "1:  ll      %0, %2\n"
951                               "    subu    %1, %0, 1\n"
952                               "    sc      %1, %2\n"
953                               "    beqz    %1, 1b\n"
954                               "    .set    mips0\n"
955                               : "=&r" (result), "=&r" (tmp), "=m" (*val)
956                               : "m" (*val));
957         return result - 1;
958 }
959
960 #define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
961
962 static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
963                                                 gint32 exch, gint32 comp) {
964         gint32 old, tmp;
965
966         __asm__ __volatile__ ("    .set    mips32\n"
967                               "1:  ll      %0, %2\n"
968                               "    bne     %0, %5, 2f\n"
969                               "    move    %1, %4\n"
970                               "    sc      %1, %2\n"
971                               "    beqz    %1, 1b\n"
972                               "2:  .set    mips0\n"
973                               : "=&r" (old), "=&r" (tmp), "=m" (*dest)
974                               : "m" (*dest), "r" (exch), "r" (comp));
975         return(old);
976 }
977
978 static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
979 {
980         gint32 result, tmp;
981
982         __asm__ __volatile__ ("    .set    mips32\n"
983                               "1:  ll      %0, %2\n"
984                               "    move    %1, %4\n"
985                               "    sc      %1, %2\n"
986                               "    beqz    %1, 1b\n"
987                               "    .set    mips0\n"
988                               : "=&r" (result), "=&r" (tmp), "=m" (*dest)
989                               : "m" (*dest), "r" (exch));
990         return(result);
991 }
992 #define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
993
994 static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
995 {
996         gint32 result, tmp;
997
998         __asm__ __volatile__ ("    .set    mips32\n"
999                               "1:  ll      %0, %2\n"
1000                               "    addu    %1, %0, %4\n"
1001                               "    sc      %1, %2\n"
1002                               "    beqz    %1, 1b\n"
1003                               "    .set    mips0\n"
1004                               : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1005                               : "m" (*dest), "r" (add));
1006         return result;
1007 }
1008
1009 #else
1010
1011 extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1012 extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1013 extern gint32 InterlockedIncrement(volatile gint32 *dest);
1014 extern gint32 InterlockedDecrement(volatile gint32 *dest);
1015 extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1016 extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1017 extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1018
1019 #if defined(__hpux) && !defined(__GNUC__)
1020 #define WAPI_ATOMIC_ASM
1021 #endif
1022
1023 #endif
1024
1025 #endif /* _WAPI_ATOMIC_H_ */