12 #include <sys/types.h>
18 #include "mono-codeman.h"
32 /* if a chunk has less than this amount of free space it's considered full */
33 #define MAX_WASTAGE 32
38 #define MAP_ANONYMOUS MAP_ANON
45 #define ARCH_MAP_FLAGS MAP_32BIT
47 #define ARCH_MAP_FLAGS 0
50 typedef struct _CodeChunck CodeChunk;
62 unsigned int flags: 8;
63 /* this number of bytes is available to resolve addresses far in memory */
64 unsigned int bsize: 24;
67 struct _MonoCodeManager {
74 mono_code_manager_new (void)
76 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
86 mono_code_manager_new_dynamic (void)
88 MonoCodeManager *cman = mono_code_manager_new ();
95 free_chunklist (CodeChunk *chunk)
101 if (dead->flags == CODE_FLAG_MMAP) {
103 munmap (dead->data, dead->size);
105 } else if (dead->flags == CODE_FLAG_MALLOC) {
113 mono_code_manager_destroy (MonoCodeManager *cman)
115 free_chunklist (cman->full);
116 free_chunklist (cman->current);
120 /* fill all the memory with the 0x2a (42) value */
122 mono_code_manager_invalidate (MonoCodeManager *cman)
126 #if defined(__i386__) || defined(__x86_64__)
127 int fill_value = 0xcc; /* x86 break */
129 int fill_value = 0x2a;
132 for (chunk = cman->current; chunk; chunk = chunk->next)
133 memset (chunk->data, fill_value, chunk->size);
134 for (chunk = cman->full; chunk; chunk = chunk->next)
135 memset (chunk->data, fill_value, chunk->size);
139 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
142 for (chunk = cman->current; chunk; chunk = chunk->next) {
143 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
146 for (chunk = cman->full; chunk; chunk = chunk->next) {
147 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
153 query_pagesize (void)
155 #ifdef PLATFORM_WIN32
157 GetSystemInfo (&info);
158 return info.dwAllocationGranularity;
160 return getpagesize ();
164 /* BIND_ROOM is the divisor for the chunck of code size dedicated
165 * to binding branches (branches not reachable with the immediate displacement)
166 * bind_size = size/BIND_ROOM;
167 * we should reduce it and make MIN_PAGES bigger for such systems
169 #if defined(__ppc__) || defined(__powerpc__)
177 new_codechunk (int dynamic, int size)
179 static int pagesize = 0;
180 int minsize, flags = CODE_FLAG_MMAP;
181 int chunk_size, bsize = 0;
186 flags = CODE_FLAG_MALLOC;
190 pagesize = query_pagesize ();
194 flags = CODE_FLAG_MALLOC;
197 minsize = pagesize * MIN_PAGES;
199 chunk_size = minsize;
202 chunk_size += pagesize - 1;
203 chunk_size &= ~ (pagesize - 1);
207 bsize = chunk_size / BIND_ROOM;
208 if (bsize < MIN_BSIZE)
210 bsize += MIN_ALIGN -1;
211 bsize &= ~ (MIN_ALIGN - 1);
212 if (chunk_size - size < bsize) {
213 chunk_size = size + bsize;
214 chunk_size += pagesize - 1;
215 chunk_size &= ~ (pagesize - 1);
219 /* does it make sense to use the mmap-like API? */
220 if (flags == CODE_FLAG_MALLOC) {
221 ptr = malloc (chunk_size);
228 ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS|ARCH_MAP_FLAGS, -1, 0);
229 if (ptr == (void*)-1) {
230 int fd = open ("/dev/zero", O_RDONLY);
232 ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|ARCH_MAP_FLAGS, fd, 0);
235 if (ptr == (void*)-1) {
236 ptr = malloc (chunk_size);
239 flags = CODE_FLAG_MALLOC;
247 if (flags == CODE_FLAG_MALLOC) {
249 * AMD64 processors maintain icache coherency only for pages which are
252 #ifndef PLATFORM_WIN32
254 char *page_start = (char *) (((gssize) (ptr)) & ~ (pagesize - 1));
255 int pages = ((char*)ptr + chunk_size - page_start + pagesize - 1) / pagesize;
256 int err = mprotect (page_start, pages * pagesize, PROT_READ | PROT_WRITE | PROT_EXEC);
262 int err = VirtualProtect (ptr, chunk_size, PAGE_EXECUTE_READWRITE, &oldp);
268 /* Make sure the thunks area is zeroed */
269 memset (ptr, 0, bsize);
273 chunk = malloc (sizeof (CodeChunk));
275 if (flags == CODE_FLAG_MALLOC)
279 munmap (ptr, chunk_size);
284 chunk->size = chunk_size;
286 chunk->flags = flags;
288 chunk->bsize = bsize;
290 /*printf ("code chunk at: %p\n", ptr);*/
295 mono_code_manager_reserve (MonoCodeManager *cman, int size)
297 CodeChunk *chunk, *prev;
301 size &= ~ (MIN_ALIGN - 1);
303 if (!cman->current) {
304 cman->current = new_codechunk (cman->dynamic, size);
309 for (chunk = cman->current; chunk; chunk = chunk->next) {
310 if (chunk->pos + size <= chunk->size) {
311 ptr = chunk->data + chunk->pos;
317 * no room found, move one filled chunk to cman->full
318 * to keep cman->current from growing too much
321 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
322 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
325 prev->next = chunk->next;
327 cman->current = chunk->next;
329 chunk->next = cman->full;
333 chunk = new_codechunk (cman->dynamic, size);
336 chunk->next = cman->current;
337 cman->current = chunk;
338 ptr = chunk->data + chunk->pos;
344 * if we reserved too much room for a method and we didn't allocate
345 * already from the code manager, we can get back the excess allocation.
348 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
350 newsize += MIN_ALIGN;
351 newsize &= ~ (MIN_ALIGN - 1);
353 size &= ~ (MIN_ALIGN - 1);
355 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
356 cman->current->pos -= size - newsize;