2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
16 * Some of the machine specific code was borrowed from our GC distribution.
19 /* The following really assume we have a 486 or better. Unfortunately */
20 /* gcc doesn't define a suitable feature test macro based on command */
22 /* We should perhaps test dynamically. */
24 #include "../all_aligned_atomic_load_store.h"
26 /* Real X86 implementations, except for some old WinChips, appear */
27 /* to enforce ordering between memory operations, EXCEPT that a later */
28 /* read can pass earlier writes, presumably due to the visible */
29 /* presence of store buffers. */
30 /* We ignore both the WinChips, and the fact that the official specs */
31 /* seem to be much weaker (and arguably too weak to be usable). */
33 #include "../ordered_except_wr.h"
35 #include "../test_and_set_t_is_char.h"
37 #include "../standard_ao_double_t.h"
39 #if defined(AO_USE_PENTIUM4_INSTRS)
43 __asm__ __volatile__("mfence" : : : "memory");
45 #define AO_HAVE_nop_full
49 /* We could use the cpuid instruction. But that seems to be slower */
50 /* than the default implementation based on test_and_set_full. Thus */
51 /* we omit that bit of misinformation here. */
55 /* As far as we can tell, the lfence and sfence instructions are not */
56 /* currently needed or useful for cached memory accesses. */
58 /* Really only works for 486 and later */
60 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
64 __asm__ __volatile__ ("lock; xaddl %0, %1" :
65 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
69 #define AO_HAVE_fetch_and_add_full
71 AO_INLINE unsigned char
72 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
76 __asm__ __volatile__ ("lock; xaddb %0, %1" :
77 "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
81 #define AO_HAVE_char_fetch_and_add_full
83 AO_INLINE unsigned short
84 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
86 unsigned short result;
88 __asm__ __volatile__ ("lock; xaddw %0, %1" :
89 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
93 #define AO_HAVE_short_fetch_and_add_full
95 /* Really only works for 486 and later */
97 AO_or_full (volatile AO_t *p, AO_t incr)
99 __asm__ __volatile__ ("lock; orl %1, %0" :
100 "=m" (*p) : "r" (incr), "m" (*p) : "memory");
102 #define AO_HAVE_or_full
104 AO_INLINE AO_TS_VAL_t
105 AO_test_and_set_full(volatile AO_TS_t *addr)
107 unsigned char oldval;
108 /* Note: the "xchg" instruction does not need a "lock" prefix */
109 __asm__ __volatile__("xchgb %0, %1"
110 : "=q"(oldval), "=m"(*addr)
111 : "0"((unsigned char)0xff), "m"(*addr) : "memory");
112 return (AO_TS_VAL_t)oldval;
114 #define AO_HAVE_test_and_set_full
116 /* Returns nonzero if the comparison succeeded. */
118 AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
120 # ifdef AO_USE_SYNC_CAS_BUILTIN
121 return (int)__sync_bool_compare_and_swap(addr, old, new_val);
124 __asm__ __volatile__("lock; cmpxchgl %3, %0; setz %1"
125 : "=m" (*addr), "=a" (result)
126 : "m" (*addr), "r" (new_val), "a" (old) : "memory");
130 #define AO_HAVE_compare_and_swap_full
132 /* Returns nonzero if the comparison succeeded. */
133 /* Really requires at least a Pentium. */
135 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
136 AO_t old_val1, AO_t old_val2,
137 AO_t new_val1, AO_t new_val2)
141 /* If PIC is turned on, we can't use %ebx as it is reserved for the */
142 /* GOT pointer. We can save and restore %ebx because GCC won't be */
143 /* using it for anything else (such as any of the m operands). */
144 # if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
145 /* Starting from GCC 4.3, we use %edi (for new_val1) instead of a */
146 /* memory operand and swap instruction instead of push/pop because */
147 /* some GCC releases have a bug in processing memory operands (if */
148 /* address base is %esp) in the inline assembly after push. */
149 __asm__ __volatile__("xchg %%ebx,%6;" /* swap GOT ptr and new_val1 */
150 "lock; cmpxchg8b %0; setz %1;"
151 "xchg %%ebx,%6;" /* restore ebx and edi */
152 : "=m"(*addr), "=a"(result)
153 : "m"(*addr), "d" (old_val2), "a" (old_val1),
154 "c" (new_val2), "D" (new_val1) : "memory");
156 /* For older compiler releases, we continue to use push/pop as at */
157 /* least GCC 4.2.1 does not recognize 'D' as a valid register name. */
158 __asm__ __volatile__("pushl %%ebx;" /* save ebx used for PIC GOT ptr */
159 "movl %6,%%ebx;" /* move new_val1 to %ebx */
160 "lock; cmpxchg8b %0; setz %1;"
161 "pop %%ebx;" /* restore %ebx */
162 : "=m"(*addr), "=a"(result)
163 : "m"(*addr), "d" (old_val2), "a" (old_val1),
164 "c" (new_val2), "m" (new_val1) : "memory");
167 /* We can't just do the same thing in non-PIC mode, because GCC
168 * might be using %ebx as the memory operand. We could have ifdef'd
169 * in a clobber, but there's no point doing the push/pop if we don't
171 __asm__ __volatile__("lock; cmpxchg8b %0; setz %1;"
172 : "=m"(*addr), "=a"(result)
173 : "m"(*addr), "d" (old_val2), "a" (old_val1),
174 "c" (new_val2), "b" (new_val1) : "memory");
178 #define AO_HAVE_compare_double_and_swap_double_full
180 #include "../ao_t_is_int.h"