2 * memfuncs.c: Our own bzero/memmove.
4 * Copyright (C) 2013-2015 Xamarin Inc
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License 2.0 as published by the Free Software Foundation;
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License 2.0 along with this library; if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
22 * need to copy or zero out memory in code that might be interrupted by collections. To
23 * guarantee that those operations will not result in invalid pointers, we must do it
26 * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
27 * cases where one would assume so. For instance, some implementations (like Darwin's on
28 * x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
29 * the region preceding the first vector-aligned address. That region could be
30 * word-aligned, but it would still be copied byte-wise.
32 * All our memory writes here are to "volatile" locations. This is so that C compilers
33 * don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
43 #define ptr_mask ((sizeof (void*) - 1))
44 #define _toi(ptr) ((size_t)ptr)
45 #define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
46 #define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
47 #define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
48 #if SIZEOF_VOID_P == 4
49 #define bytes_to_words(n) ((size_t)(n) >> 2)
50 #elif SIZEOF_VOID_P == 8
51 #define bytes_to_words(n) ((size_t)(n) >> 3)
53 #error We only support 32 and 64 bit architectures.
56 #define BZERO_WORDS(dest,words) do { \
57 void * volatile *__d = (void* volatile*)(dest); \
60 for (__i = 0; __i < __n; ++__i) \
66 * mono_gc_bzero_aligned:
67 * @dest: address to start to clear
68 * @size: size of the region to clear
70 * Zero @size bytes starting at @dest.
71 * The address of @dest MUST be aligned to word boundaries
73 * FIXME borrow faster code from some BSD libc or bionic
76 mono_gc_bzero_aligned (void *dest, size_t size)
78 volatile char *d = (char*)dest;
79 size_t tail_bytes, word_bytes;
81 g_assert (unaligned_bytes (dest) == 0);
83 /* copy all words with memmove */
84 word_bytes = (size_t)align_down (size);
86 case sizeof (void*) * 1:
89 case sizeof (void*) * 2:
92 case sizeof (void*) * 3:
95 case sizeof (void*) * 4:
99 BZERO_WORDS (d, bytes_to_words (word_bytes));
102 tail_bytes = unaligned_bytes (size);
107 } while (--tail_bytes);
112 * mono_gc_bzero_atomic:
113 * @dest: address to start to clear
114 * @size: size of the region to clear
116 * Zero @size bytes starting at @dest.
118 * Use this to zero memory without word tearing when dest is aligned.
121 mono_gc_bzero_atomic (void *dest, size_t size)
123 if (unaligned_bytes (dest))
124 memset (dest, 0, size);
126 mono_gc_bzero_aligned (dest, size);
129 #define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
130 void * volatile *__d = (void* volatile*)(dest); \
131 void **__s = (void**)(src); \
132 int __n = (int)(words); \
134 for (__i = 0; __i < __n; ++__i) \
135 __d [__i] = __s [__i]; \
138 #define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
139 void * volatile *__d = (void* volatile*)(dest); \
140 void **__s = (void**)(src); \
141 int __n = (int)(words); \
143 for (__i = __n - 1; __i >= 0; --__i) \
144 __d [__i] = __s [__i]; \
149 * mono_gc_memmove_aligned:
150 * @dest: destination of the move
152 * @size: size of the block to move
154 * Move @size bytes from @src to @dest.
156 * Use this to copy memory without word tearing when both pointers are aligned
158 mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
160 g_assert (unaligned_bytes (dest) == 0);
161 g_assert (unaligned_bytes (src) == 0);
164 If we're copying less than a word we don't need to worry about word tearing
165 so we bailout to memmove early.
167 if (size < sizeof(void*)) {
168 memmove (dest, src, size);
173 * A bit of explanation on why we align only dest before doing word copies.
174 * Pointers to managed objects must always be stored in word aligned addresses, so
175 * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
177 * We don't need to case when source and destination have different alignments since we only do word stores
178 * using memmove, which must handle it.
180 if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
181 volatile char *p = (char*)dest + size;
182 char *s = (char*)src + size;
183 char *start = (char*)dest;
184 char *align_end = MAX((char*)dest, (char*)align_down (p));
186 size_t bytes_to_memmove;
188 while (p > align_end)
191 word_start = (char *)align_up (start);
192 bytes_to_memmove = p - word_start;
193 p -= bytes_to_memmove;
194 s -= bytes_to_memmove;
195 MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
197 volatile char *d = (char*)dest;
198 const char *s = (const char*)src;
201 /* copy all words with memmove */
202 MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
204 tail_bytes = unaligned_bytes (size);
206 d += (size_t)align_down (size);
207 s += (size_t)align_down (size);
210 } while (--tail_bytes);
216 * mono_gc_memmove_atomic:
217 * @dest: destination of the move
219 * @size: size of the block to move
221 * Move @size bytes from @src to @dest.
223 * Use this to copy memory without word tearing when both pointers are aligned
226 mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
228 if (unaligned_bytes (_toi (dest) | _toi (src)))
229 memmove (dest, src, size);
231 mono_gc_memmove_aligned (dest, src, size);