2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
16 * Some of the machine specific code was borrowed from our GC distribution.
19 #include "../all_aligned_atomic_load_store.h"
21 /* Real X86 implementations appear */
22 /* to enforce ordering between memory operations, EXCEPT that a later */
23 /* read can pass earlier writes, presumably due to the visible */
24 /* presence of store buffers. */
25 /* We ignore the fact that the official specs */
26 /* seem to be much weaker (and arguably too weak to be usable). */
28 #include "../ordered_except_wr.h"
30 #include "../test_and_set_t_is_char.h"
32 #include "../standard_ao_double_t.h"
34 #if defined(AO_USE_PENTIUM4_INSTRS)
38 __asm__ __volatile__("mfence" : : : "memory");
41 #define AO_HAVE_nop_full
45 /* We could use the cpuid instruction. But that seems to be slower */
46 /* than the default implementation based on test_and_set_full. Thus */
47 /* we omit that bit of misinformation here. */
51 /* As far as we can tell, the lfence and sfence instructions are not */
52 /* currently needed or useful for cached memory accesses. */
55 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
59 __asm__ __volatile__ ("lock; xaddq %0, %1" :
60 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
65 #define AO_HAVE_fetch_and_add_full
67 AO_INLINE unsigned char
68 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
72 __asm__ __volatile__ ("lock; xaddb %0, %1" :
73 "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
78 #define AO_HAVE_char_fetch_and_add_full
80 AO_INLINE unsigned short
81 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
83 unsigned short result;
85 __asm__ __volatile__ ("lock; xaddw %0, %1" :
86 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
91 #define AO_HAVE_short_fetch_and_add_full
93 AO_INLINE unsigned int
94 AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
98 __asm__ __volatile__ ("lock; xaddl %0, %1" :
99 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
104 #define AO_HAVE_int_fetch_and_add_full
107 AO_or_full (volatile AO_t *p, AO_t incr)
109 __asm__ __volatile__ ("lock; orq %1, %0" :
110 "=m" (*p) : "r" (incr), "m" (*p) : "memory");
113 #define AO_HAVE_or_full
115 AO_INLINE AO_TS_VAL_t
116 AO_test_and_set_full(volatile AO_TS_t *addr)
118 unsigned char oldval;
119 /* Note: the "xchg" instruction does not need a "lock" prefix */
120 __asm__ __volatile__("xchgb %0, %1"
121 : "=q"(oldval), "=m"(*addr)
122 : "0"(0xff), "m"(*addr) : "memory");
123 return (AO_TS_VAL_t)oldval;
126 #define AO_HAVE_test_and_set_full
128 /* Returns nonzero if the comparison succeeded. */
130 AO_compare_and_swap_full(volatile AO_t *addr,
131 AO_t old, AO_t new_val)
134 __asm__ __volatile__("lock; cmpxchgq %3, %0; setz %1"
135 : "=m"(*addr), "=q"(result)
136 : "m"(*addr), "r" (new_val), "a"(old) : "memory");
140 #define AO_HAVE_compare_and_swap_full
142 #ifdef AO_CMPXCHG16B_AVAILABLE
143 /* NEC LE-IT: older AMD Opterons are missing this instruction.
144 * On these machines SIGILL will be thrown.
145 * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
146 * (lock based) version available */
147 /* HB: Changed this to not define either by default. There are
148 * enough machines and tool chains around on which cmpxchg16b
149 * doesn't work. And the emulation is unsafe by our usual rules.
150 * Hoewever both are clearly useful in certain cases.
153 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
154 AO_t old_val1, AO_t old_val2,
155 AO_t new_val1, AO_t new_val2)
158 __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
159 : "=m"(*addr), "=q"(result)
164 "b" (new_val1) : "memory");
167 #define AO_HAVE_compare_double_and_swap_double_full
169 /* this one provides spinlock based emulation of CAS implemented in */
170 /* atomic_ops.c. We probably do not want to do this here, since it is */
171 /* not atomic with respect to other kinds of updates of *addr. On the */
172 /* other hand, this may be a useful facility on occasion. */
173 #ifdef AO_WEAK_DOUBLE_CAS_EMULATION
174 int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
175 AO_t old_val1, AO_t old_val2,
176 AO_t new_val1, AO_t new_val2);
179 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
180 AO_t old_val1, AO_t old_val2,
181 AO_t new_val1, AO_t new_val2)
183 return AO_compare_double_and_swap_double_emulation(addr,
187 #define AO_HAVE_compare_double_and_swap_double_full
188 #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
189 #endif /* AO_CMPXCHG16B_AVAILABLE */