2 * test-sgen-qsort.c: Our own bzero/memmove.
4 * Copyright (C) 2013 Xamarin Inc
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License 2.0 as published by the Free Software Foundation;
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License 2.0 along with this library; if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
22 * need to copy or zero out memory in code that might be interrupted by collections. To
23 * guarantee that those operations will not result in invalid pointers, we must do it
26 * libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
27 * cases where one would assume so. For instance, some implementations (like Darwin's on
28 * x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
29 * the region preceding the first vector-aligned address. That region could be
30 * word-aligned, but it would still be copied byte-wise.
32 * All our memory writes here are to "volatile" locations. This is so that C compilers
33 * don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
39 #include "metadata/gc-internal.h"
41 #define ptr_mask ((sizeof (void*) - 1))
42 #define _toi(ptr) ((size_t)ptr)
43 #define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
44 #define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
45 #define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
46 #if SIZEOF_VOID_P == 4
47 #define bytes_to_words(n) ((size_t)(n) >> 2)
48 #elif SIZEOF_VOID_P == 8
49 #define bytes_to_words(n) ((size_t)(n) >> 3)
51 #error We only support 32 and 64 bit architectures.
54 #define BZERO_WORDS(dest,words) do { \
55 void * volatile *__d = (void* volatile*)(dest); \
58 for (__i = 0; __i < __n; ++__i) \
64 * @dest: address to start to clear
65 * @size: size of the region to clear
67 * Zero @size bytes starting at @dest.
69 * Use this to zero memory that can hold managed pointers.
71 * FIXME borrow faster code from some BSD libc or bionic
74 mono_gc_bzero (void *dest, size_t size)
76 volatile char *d = (char*)dest;
77 size_t tail_bytes, word_bytes;
80 If we're copying less than a word, just use memset.
82 We cannot bail out early if both are aligned because some implementations
83 use byte copying for sizes smaller than 16. OSX, on this case.
85 if (size < sizeof(void*)) {
86 memset (dest, 0, size);
90 /*align to word boundary */
91 while (unaligned_bytes (d) && size) {
96 /* copy all words with memmove */
97 word_bytes = (size_t)align_down (size);
99 case sizeof (void*) * 1:
102 case sizeof (void*) * 2:
105 case sizeof (void*) * 3:
108 case sizeof (void*) * 4:
112 BZERO_WORDS (d, bytes_to_words (word_bytes));
115 tail_bytes = unaligned_bytes (size);
120 } while (--tail_bytes);
124 #define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
125 void * volatile *__d = (void* volatile*)(dest); \
126 void **__s = (void**)(src); \
127 int __n = (int)(words); \
129 for (__i = 0; __i < __n; ++__i) \
130 __d [__i] = __s [__i]; \
133 #define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
134 void * volatile *__d = (void* volatile*)(dest); \
135 void **__s = (void**)(src); \
136 int __n = (int)(words); \
138 for (__i = __n - 1; __i >= 0; --__i) \
139 __d [__i] = __s [__i]; \
144 * @dest: destination of the move
146 * @size: size of the block to move
148 * Move @size bytes from @src to @dest.
149 * size MUST be a multiple of sizeof (gpointer)
153 mono_gc_memmove (void *dest, const void *src, size_t size)
156 If we're copying less than a word we don't need to worry about word tearing
157 so we bailout to memmove early.
159 if (size < sizeof(void*)) {
160 memmove (dest, src, size);
165 * A bit of explanation on why we align only dest before doing word copies.
166 * Pointers to managed objects must always be stored in word aligned addresses, so
167 * even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
169 * We don't need to case when source and destination have different alignments since we only do word stores
170 * using memmove, which must handle it.
172 if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
173 volatile char *p = (char*)dest + size;
174 char *s = (char*)src + size;
175 char *start = (char*)dest;
176 char *align_end = MAX((char*)dest, (char*)align_down (p));
178 size_t bytes_to_memmove;
180 while (p > align_end)
183 word_start = align_up (start);
184 bytes_to_memmove = p - word_start;
185 p -= bytes_to_memmove;
186 s -= bytes_to_memmove;
187 MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
192 volatile char *d = (char*)dest;
193 const char *s = (const char*)src;
196 /*align to word boundary */
197 while (unaligned_bytes (d)) {
202 /* copy all words with memmove */
203 MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
205 tail_bytes = unaligned_bytes (size);
207 d += (size_t)align_down (size);
208 s += (size_t)align_down (size);
211 } while (--tail_bytes);