2 * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 /* The following is useful primarily for debugging and documentation. */
24 /* We define various atomic operations by acquiring a global pthread */
25 /* lock. The resulting implementation will perform poorly, but should */
26 /* be correct unless it is used from signal handlers. */
27 /* We assume that all pthread operations act like full memory barriers. */
28 /* (We believe that is the intent of the specification.) */
32 #include "test_and_set_t_is_ao_t.h"
33 /* This is not necessarily compatible with the native */
34 /* implementation. But those can't be safely mixed anyway. */
36 /* We define only the full barrier variants, and count on the */
37 /* generalization section below to fill in the rest. */
38 extern pthread_mutex_t AO_pt_lock;
43 pthread_mutex_lock(&AO_pt_lock);
44 pthread_mutex_unlock(&AO_pt_lock);
46 #define AO_HAVE_nop_full
49 AO_load_full(const volatile AO_t *addr)
52 pthread_mutex_lock(&AO_pt_lock);
54 pthread_mutex_unlock(&AO_pt_lock);
57 #define AO_HAVE_load_full
60 AO_store_full(volatile AO_t *addr, AO_t val)
62 pthread_mutex_lock(&AO_pt_lock);
64 pthread_mutex_unlock(&AO_pt_lock);
66 #define AO_HAVE_store_full
68 AO_INLINE unsigned char
69 AO_char_load_full(const volatile unsigned char *addr)
72 pthread_mutex_lock(&AO_pt_lock);
74 pthread_mutex_unlock(&AO_pt_lock);
77 #define AO_HAVE_char_load_full
80 AO_char_store_full(volatile unsigned char *addr, unsigned char val)
82 pthread_mutex_lock(&AO_pt_lock);
84 pthread_mutex_unlock(&AO_pt_lock);
86 #define AO_HAVE_char_store_full
88 AO_INLINE unsigned short
89 AO_short_load_full(const volatile unsigned short *addr)
91 unsigned short result;
92 pthread_mutex_lock(&AO_pt_lock);
94 pthread_mutex_unlock(&AO_pt_lock);
97 #define AO_HAVE_short_load_full
100 AO_short_store_full(volatile unsigned short *addr, unsigned short val)
102 pthread_mutex_lock(&AO_pt_lock);
104 pthread_mutex_unlock(&AO_pt_lock);
106 #define AO_HAVE_short_store_full
108 AO_INLINE unsigned int
109 AO_int_load_full(const volatile unsigned int *addr)
112 pthread_mutex_lock(&AO_pt_lock);
114 pthread_mutex_unlock(&AO_pt_lock);
117 #define AO_HAVE_int_load_full
120 AO_int_store_full(volatile unsigned int *addr, unsigned int val)
122 pthread_mutex_lock(&AO_pt_lock);
124 pthread_mutex_unlock(&AO_pt_lock);
126 #define AO_HAVE_int_store_full
128 AO_INLINE AO_TS_VAL_t
129 AO_test_and_set_full(volatile AO_TS_t *addr)
132 pthread_mutex_lock(&AO_pt_lock);
133 result = (AO_TS_VAL_t)(*addr);
135 pthread_mutex_unlock(&AO_pt_lock);
136 assert(result == AO_TS_SET || result == AO_TS_CLEAR);
139 #define AO_HAVE_test_and_set_full
142 AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
146 pthread_mutex_lock(&AO_pt_lock);
149 pthread_mutex_unlock(&AO_pt_lock);
152 #define AO_HAVE_fetch_and_add_full
154 AO_INLINE unsigned char
155 AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
159 pthread_mutex_lock(&AO_pt_lock);
162 pthread_mutex_unlock(&AO_pt_lock);
165 #define AO_HAVE_char_fetch_and_add_full
167 AO_INLINE unsigned short
168 AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
172 pthread_mutex_lock(&AO_pt_lock);
175 pthread_mutex_unlock(&AO_pt_lock);
178 #define AO_HAVE_short_fetch_and_add_full
180 AO_INLINE unsigned int
181 AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
185 pthread_mutex_lock(&AO_pt_lock);
188 pthread_mutex_unlock(&AO_pt_lock);
191 #define AO_HAVE_int_fetch_and_add_full
194 AO_or_full(volatile AO_t *p, AO_t incr)
198 pthread_mutex_lock(&AO_pt_lock);
201 pthread_mutex_unlock(&AO_pt_lock);
203 #define AO_HAVE_or_full
206 AO_compare_and_swap_full(volatile AO_t *addr,
207 AO_t old, AO_t new_val)
209 pthread_mutex_lock(&AO_pt_lock);
213 pthread_mutex_unlock(&AO_pt_lock);
217 pthread_mutex_unlock(&AO_pt_lock);
220 #define AO_HAVE_compare_and_swap_full
222 /* Unlike real architectures, we define both double-width CAS variants. */
228 #define AO_HAVE_double_t
231 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
232 AO_t old1, AO_t old2,
233 AO_t new1, AO_t new2)
235 pthread_mutex_lock(&AO_pt_lock);
236 if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2)
238 addr -> AO_val1 = new1;
239 addr -> AO_val2 = new2;
240 pthread_mutex_unlock(&AO_pt_lock);
244 pthread_mutex_unlock(&AO_pt_lock);
247 #define AO_HAVE_compare_double_and_swap_double_full
250 AO_compare_and_swap_double_full(volatile AO_double_t *addr,
252 AO_t new1, AO_t new2)
254 pthread_mutex_lock(&AO_pt_lock);
255 if (addr -> AO_val1 == old1)
257 addr -> AO_val1 = new1;
258 addr -> AO_val2 = new2;
259 pthread_mutex_unlock(&AO_pt_lock);
263 pthread_mutex_unlock(&AO_pt_lock);
266 #define AO_HAVE_compare_and_swap_double_full
268 /* We can't use hardware loads and stores, since they don't */
269 /* interact correctly with atomic updates. */