12 #include <sys/types.h>
18 #include "mono-codeman.h"
26 #if defined(__ia64__) || defined(__x86_64__)
28 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
29 * properly aligned for SSE2.
36 /* if a chunk has less than this amount of free space it's considered full */
37 #define MAX_WASTAGE 32
42 #define MAP_ANONYMOUS MAP_ANON
49 #define ARCH_MAP_FLAGS MAP_32BIT
51 #define ARCH_MAP_FLAGS 0
54 typedef struct _CodeChunck CodeChunk;
66 unsigned int flags: 8;
67 /* this number of bytes is available to resolve addresses far in memory */
68 unsigned int bsize: 24;
71 struct _MonoCodeManager {
78 mono_code_manager_new (void)
80 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
90 mono_code_manager_new_dynamic (void)
92 MonoCodeManager *cman = mono_code_manager_new ();
99 free_chunklist (CodeChunk *chunk)
105 if (dead->flags == CODE_FLAG_MMAP) {
107 munmap (dead->data, dead->size);
109 } else if (dead->flags == CODE_FLAG_MALLOC) {
117 mono_code_manager_destroy (MonoCodeManager *cman)
119 free_chunklist (cman->full);
120 free_chunklist (cman->current);
124 /* fill all the memory with the 0x2a (42) value */
126 mono_code_manager_invalidate (MonoCodeManager *cman)
130 #if defined(__i386__) || defined(__x86_64__)
131 int fill_value = 0xcc; /* x86 break */
133 int fill_value = 0x2a;
136 for (chunk = cman->current; chunk; chunk = chunk->next)
137 memset (chunk->data, fill_value, chunk->size);
138 for (chunk = cman->full; chunk; chunk = chunk->next)
139 memset (chunk->data, fill_value, chunk->size);
143 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
146 for (chunk = cman->current; chunk; chunk = chunk->next) {
147 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
150 for (chunk = cman->full; chunk; chunk = chunk->next) {
151 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
157 query_pagesize (void)
159 #ifdef PLATFORM_WIN32
161 GetSystemInfo (&info);
162 return info.dwAllocationGranularity;
164 return getpagesize ();
168 /* BIND_ROOM is the divisor for the chunck of code size dedicated
169 * to binding branches (branches not reachable with the immediate displacement)
170 * bind_size = size/BIND_ROOM;
171 * we should reduce it and make MIN_PAGES bigger for such systems
173 #if defined(__ppc__) || defined(__powerpc__)
181 new_codechunk (int dynamic, int size)
183 static int pagesize = 0;
184 int minsize, flags = CODE_FLAG_MMAP;
185 int chunk_size, bsize = 0;
190 flags = CODE_FLAG_MALLOC;
194 pagesize = query_pagesize ();
198 flags = CODE_FLAG_MALLOC;
201 minsize = pagesize * MIN_PAGES;
203 chunk_size = minsize;
206 chunk_size += pagesize - 1;
207 chunk_size &= ~ (pagesize - 1);
211 bsize = chunk_size / BIND_ROOM;
212 if (bsize < MIN_BSIZE)
214 bsize += MIN_ALIGN -1;
215 bsize &= ~ (MIN_ALIGN - 1);
216 if (chunk_size - size < bsize) {
217 chunk_size = size + bsize;
218 chunk_size += pagesize - 1;
219 chunk_size &= ~ (pagesize - 1);
223 /* does it make sense to use the mmap-like API? */
224 if (flags == CODE_FLAG_MALLOC) {
225 ptr = malloc (chunk_size);
232 ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS|ARCH_MAP_FLAGS, -1, 0);
233 if (ptr == (void*)-1) {
234 int fd = open ("/dev/zero", O_RDONLY);
236 ptr = mmap (0, chunk_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|ARCH_MAP_FLAGS, fd, 0);
239 if (ptr == (void*)-1) {
240 ptr = malloc (chunk_size);
243 flags = CODE_FLAG_MALLOC;
251 if (flags == CODE_FLAG_MALLOC) {
253 * AMD64 processors maintain icache coherency only for pages which are
256 #ifndef PLATFORM_WIN32
258 char *page_start = (char *) (((gssize) (ptr)) & ~ (pagesize - 1));
259 int pages = ((char*)ptr + chunk_size - page_start + pagesize - 1) / pagesize;
260 int err = mprotect (page_start, pages * pagesize, PROT_READ | PROT_WRITE | PROT_EXEC);
266 int err = VirtualProtect (ptr, chunk_size, PAGE_EXECUTE_READWRITE, &oldp);
272 /* Make sure the thunks area is zeroed */
273 memset (ptr, 0, bsize);
277 chunk = malloc (sizeof (CodeChunk));
279 if (flags == CODE_FLAG_MALLOC)
283 munmap (ptr, chunk_size);
288 chunk->size = chunk_size;
290 chunk->flags = flags;
292 chunk->bsize = bsize;
294 /*printf ("code chunk at: %p\n", ptr);*/
299 mono_code_manager_reserve (MonoCodeManager *cman, int size)
301 CodeChunk *chunk, *prev;
305 size &= ~ (MIN_ALIGN - 1);
307 if (!cman->current) {
308 cman->current = new_codechunk (cman->dynamic, size);
313 for (chunk = cman->current; chunk; chunk = chunk->next) {
314 if (chunk->pos + size <= chunk->size) {
315 ptr = chunk->data + chunk->pos;
321 * no room found, move one filled chunk to cman->full
322 * to keep cman->current from growing too much
325 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
326 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
329 prev->next = chunk->next;
331 cman->current = chunk->next;
333 chunk->next = cman->full;
337 chunk = new_codechunk (cman->dynamic, size);
340 chunk->next = cman->current;
341 cman->current = chunk;
342 ptr = chunk->data + chunk->pos;
348 * if we reserved too much room for a method and we didn't allocate
349 * already from the code manager, we can get back the excess allocation.
352 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
354 newsize += MIN_ALIGN;
355 newsize &= ~ (MIN_ALIGN - 1);
357 size &= ~ (MIN_ALIGN - 1);
359 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
360 cman->current->pos -= size - newsize;