11 #include <sys/types.h>
17 #include "mono-codeman.h"
31 /* if a chunk has less than this amount of free space it's considered full */
32 #define MAX_WASTAGE 32
37 #define MAP_ANONYMOUS MAP_ANON
44 #define ARCH_MAP_FLAGS MAP_32BIT
46 #define ARCH_MAP_FLAGS 0
49 typedef struct _CodeChunck CodeChunk;
61 unsigned int flags: 8;
62 /* this number of bytes is available to resolve addresses far in memory */
63 unsigned int bsize: 24;
66 struct _MonoCodeManager {
73 mono_code_manager_new (void)
75 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
85 mono_code_manager_new_dynamic (void)
87 MonoCodeManager *cman = mono_code_manager_new ();
94 free_chunklist (CodeChunk *chunk)
100 if (dead->flags == CODE_FLAG_MMAP) {
102 munmap (dead->data, dead->size);
104 } else if (dead->flags == CODE_FLAG_MALLOC) {
112 mono_code_manager_destroy (MonoCodeManager *cman)
114 free_chunklist (cman->full);
115 free_chunklist (cman->current);
119 /* fill all the memory with the 0x2a (42) value */
121 mono_code_manager_invalidate (MonoCodeManager *cman)
125 #if defined(__i386__) || defined(__x86_64__)
126 int fill_value = 0xcc; /* x86 break */
128 int fill_value = 0x2a;
131 for (chunk = cman->current; chunk; chunk = chunk->next)
132 memset (chunk->data, fill_value, chunk->size);
133 for (chunk = cman->full; chunk; chunk = chunk->next)
134 memset (chunk->data, fill_value, chunk->size);
138 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
141 for (chunk = cman->current; chunk; chunk = chunk->next) {
142 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
145 for (chunk = cman->full; chunk; chunk = chunk->next) {
146 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
152 query_pagesize (void)
154 #ifdef PLATFORM_WIN32
156 GetSystemInfo (&info);
157 return info.dwAllocationGranularity;
159 return getpagesize ();
163 /* BIND_ROOM is the divisor for the chunck of code size dedicated
164 * to binding branches (branches not reachable with the immediate displacement)
165 * bind_size = size/BIND_ROOM;
166 * we should reduce it and make MIN_PAGES bigger for such systems
168 #if defined(__ppc__) || defined(__powerpc__)
176 new_codechunk (int dynamic, int size)
178 static int pagesize = 0;
179 int minsize, flags = CODE_FLAG_MMAP;
180 int chunk_size, bsize = 0;
185 flags = CODE_FLAG_MALLOC;
189 pagesize = query_pagesize ();
193 flags = CODE_FLAG_MALLOC;
196 minsize = pagesize * MIN_PAGES;
198 chunk_size = minsize;
201 chunk_size += pagesize - 1;
202 chunk_size &= ~ (pagesize - 1);
206 bsize = chunk_size / BIND_ROOM;
207 if (bsize < MIN_BSIZE)
209 bsize += MIN_ALIGN -1;
210 bsize &= ~ (MIN_ALIGN - 1);
211 if (chunk_size - size < bsize) {
213 chunk_size = size + bsize;
215 chunk_size += pagesize;
219 /* does it make sense to use the mmap-like API? */
220 if (flags == CODE_FLAG_MALLOC) {
221 ptr = malloc (chunk_size);
228 ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS|ARCH_MAP_FLAGS, -1, 0);
229 if (ptr == (void*)-1) {
230 int fd = open ("/dev/zero", O_RDONLY);
232 ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|ARCH_MAP_FLAGS, fd, 0);
235 if (ptr == (void*)-1) {
236 ptr = malloc (chunk_size);
239 flags = CODE_FLAG_MALLOC;
247 if (flags == CODE_FLAG_MALLOC) {
249 * AMD64 processors maintain icache coherency only for pages which are
252 #ifndef PLATFORM_WIN32
254 char *page_start = (char *) (((unsigned long long) (ptr)) & ~ (pagesize - 1));
255 int pages = ((char*)ptr + chunk_size - page_start + pagesize - 1) / pagesize;
256 int err = mprotect (page_start, pages * pagesize, PROT_READ | PROT_WRITE | PROT_EXEC);
262 int err = VirtualProtect (ptr, chunk_size, PAGE_EXECUTE_READWRITE, &oldp);
267 /* Make sure the thunks area is zeroed */
268 memset (ptr, 0, bsize);
271 chunk = malloc (sizeof (CodeChunk));
273 if (flags == CODE_FLAG_MALLOC)
277 munmap (ptr, chunk_size);
282 chunk->size = chunk_size;
284 chunk->flags = flags;
286 chunk->bsize = bsize;
288 /*printf ("code chunk at: %p\n", ptr);*/
293 mono_code_manager_reserve (MonoCodeManager *cman, int size)
295 CodeChunk *chunk, *prev;
299 size &= ~ (MIN_ALIGN - 1);
301 if (!cman->current) {
302 cman->current = new_codechunk (cman->dynamic, size);
307 for (chunk = cman->current; chunk; chunk = chunk->next) {
308 if (chunk->pos + size <= chunk->size) {
309 ptr = chunk->data + chunk->pos;
315 * no room found, move one filled chunk to cman->full
316 * to keep cman->current from growing too much
319 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
320 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
323 prev->next = chunk->next;
325 cman->current = chunk->next;
327 chunk->next = cman->full;
331 chunk = new_codechunk (cman->dynamic, size);
334 chunk->next = cman->current;
335 cman->current = chunk;
341 * if we reserved too much room for a method and we didn't allocate
342 * already from the code manager, we can get back the excess allocation.
345 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
347 newsize += MIN_ALIGN;
348 newsize &= ~ (MIN_ALIGN - 1);
350 size &= ~ (MIN_ALIGN - 1);
352 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
353 cman->current->pos -= size - newsize;