2 * mono-mmap.c: Support for mapping code into the process address space
5 * Mono Team (mono-list@lists.ximian.com)
7 * Copyright 2001-2008 Novell, Inc.
16 #include <sys/types.h>
33 #include "mono-mmap.h"
34 #include "mono-mmap-internals.h"
35 #include "mono-proclib.h"
36 #include <mono/utils/mono-threads.h>
39 #define BEGIN_CRITICAL_SECTION do { \
40 MonoThreadInfo *__info = mono_thread_info_current_unchecked (); \
41 if (__info) __info->inside_critical_region = TRUE; \
43 #define END_CRITICAL_SECTION \
44 if (__info) __info->inside_critical_region = FALSE; \
48 #define MAP_ANONYMOUS MAP_ANON
63 static void* malloced_shared_area = NULL;
66 malloc_shared_area (int pid)
68 int size = mono_pagesize ();
69 SAreaHeader *sarea = (SAreaHeader *) g_malloc0 (size);
72 sarea->stats_start = sizeof (SAreaHeader);
73 sarea->stats_end = sizeof (SAreaHeader);
79 aligned_address (char *mem, size_t size, size_t alignment)
81 char *aligned = (char*)((size_t)(mem + (alignment - 1)) & ~(alignment - 1));
82 g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((size_t)aligned & (alignment - 1)));
92 static int saved_pagesize = 0;
94 return saved_pagesize;
95 GetSystemInfo (&info);
96 saved_pagesize = info.dwAllocationGranularity;
97 return saved_pagesize;
101 prot_from_flags (int flags)
103 int prot = flags & (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC);
105 case 0: prot = PAGE_NOACCESS; break;
106 case MONO_MMAP_READ: prot = PAGE_READONLY; break;
107 case MONO_MMAP_READ|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READ; break;
108 case MONO_MMAP_READ|MONO_MMAP_WRITE: prot = PAGE_READWRITE; break;
109 case MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READWRITE; break;
110 case MONO_MMAP_WRITE: prot = PAGE_READWRITE; break;
111 case MONO_MMAP_WRITE|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READWRITE; break;
112 case MONO_MMAP_EXEC: prot = PAGE_EXECUTE; break;
114 g_assert_not_reached ();
120 mono_valloc (void *addr, size_t length, int flags)
123 int mflags = MEM_RESERVE|MEM_COMMIT;
124 int prot = prot_from_flags (flags);
125 /* translate the flags */
127 ptr = VirtualAlloc (addr, length, mflags, prot);
132 mono_valloc_aligned (size_t length, size_t alignment, int flags)
134 int prot = prot_from_flags (flags);
135 char *mem = VirtualAlloc (NULL, length + alignment, MEM_RESERVE, prot);
141 aligned = aligned_address (mem, length, alignment);
143 aligned = VirtualAlloc (aligned, length, MEM_COMMIT, prot);
149 #define HAVE_VALLOC_ALIGNED
152 mono_vfree (void *addr, size_t length)
154 MEMORY_BASIC_INFORMATION mbi;
155 SIZE_T query_result = VirtualQuery (addr, &mbi, sizeof (mbi));
158 g_assert (query_result);
160 res = VirtualFree (mbi.AllocationBase, 0, MEM_RELEASE);
168 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
172 HANDLE file, mapping;
173 int prot = prot_from_flags (flags);
174 /* translate the flags */
175 /*if (flags & MONO_MMAP_PRIVATE)
176 mflags |= MAP_PRIVATE;
177 if (flags & MONO_MMAP_SHARED)
178 mflags |= MAP_SHARED;
179 if (flags & MONO_MMAP_ANON)
180 mflags |= MAP_ANONYMOUS;
181 if (flags & MONO_MMAP_FIXED)
183 if (flags & MONO_MMAP_32BIT)
184 mflags |= MAP_32BIT;*/
186 mflags = FILE_MAP_READ;
187 if (flags & MONO_MMAP_WRITE)
188 mflags = FILE_MAP_COPY;
190 file = (HANDLE) _get_osfhandle (fd);
191 mapping = CreateFileMapping (file, NULL, prot, 0, 0, NULL);
194 ptr = MapViewOfFile (mapping, mflags, 0, offset, length);
196 CloseHandle (mapping);
199 *ret_handle = (void*)mapping;
204 mono_file_unmap (void *addr, void *handle)
206 UnmapViewOfFile (addr);
207 CloseHandle ((HANDLE)handle);
212 mono_mprotect (void *addr, size_t length, int flags)
215 int prot = prot_from_flags (flags);
217 if (flags & MONO_MMAP_DISCARD) {
218 VirtualFree (addr, length, MEM_DECOMMIT);
219 VirtualAlloc (addr, length, MEM_COMMIT, prot);
222 return VirtualProtect (addr, length, prot, &oldprot) == 0;
226 mono_shared_area (void)
228 if (!malloced_shared_area)
229 malloced_shared_area = malloc_shared_area (0);
230 /* get the pid here */
231 return malloced_shared_area;
235 mono_shared_area_remove (void)
237 if (malloced_shared_area)
238 g_free (malloced_shared_area);
239 malloced_shared_area = NULL;
243 mono_shared_area_for_pid (void *pid)
249 mono_shared_area_unload (void *area)
254 mono_shared_area_instances (void **array, int count)
260 #if defined(HAVE_MMAP)
264 * Get the page size in use on the system. Addresses and sizes in the
265 * mono_mmap(), mono_munmap() and mono_mprotect() calls must be pagesize
268 * Returns: the page size in bytes.
273 static int saved_pagesize = 0;
275 return saved_pagesize;
276 saved_pagesize = getpagesize ();
277 return saved_pagesize;
281 prot_from_flags (int flags)
283 int prot = PROT_NONE;
284 /* translate the protection bits */
285 if (flags & MONO_MMAP_READ)
287 if (flags & MONO_MMAP_WRITE)
289 if (flags & MONO_MMAP_EXEC)
296 * @addr: memory address
297 * @length: memory area size
298 * @flags: protection flags
300 * Allocates @length bytes of virtual memory with the @flags
301 * protection. @addr can be a preferred memory address or a
302 * mandatory one if MONO_MMAP_FIXED is set in @flags.
303 * @addr must be pagesize aligned and can be NULL.
304 * @length must be a multiple of pagesize.
306 * Returns: NULL on failure, the address of the memory area otherwise
309 mono_valloc (void *addr, size_t length, int flags)
313 int prot = prot_from_flags (flags);
314 /* translate the flags */
315 if (flags & MONO_MMAP_FIXED)
317 if (flags & MONO_MMAP_32BIT)
320 mflags |= MAP_ANONYMOUS;
321 mflags |= MAP_PRIVATE;
323 BEGIN_CRITICAL_SECTION;
324 ptr = mmap (addr, length, prot, mflags, -1, 0);
325 if (ptr == MAP_FAILED) {
326 int fd = open ("/dev/zero", O_RDONLY);
328 ptr = mmap (addr, length, prot, mflags, fd, 0);
332 END_CRITICAL_SECTION;
334 if (ptr == MAP_FAILED)
341 * @addr: memory address returned by mono_valloc ()
342 * @length: size of memory area
344 * Remove the memory mapping at the address @addr.
346 * Returns: 0 on success.
349 mono_vfree (void *addr, size_t length)
352 BEGIN_CRITICAL_SECTION;
353 res = munmap (addr, length);
354 END_CRITICAL_SECTION;
360 * @length: size of data to map
361 * @flags: protection flags
362 * @fd: file descriptor
363 * @offset: offset in the file
364 * @ret_handle: pointer to storage for returning a handle for the map
366 * Map the area of the file pointed to by the file descriptor @fd, at offset
367 * @offset and of size @length in memory according to the protection flags
369 * @offset and @length must be multiples of the page size.
370 * @ret_handle must point to a void*: this value must be used when unmapping
371 * the memory area using mono_file_unmap ().
375 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
379 int prot = prot_from_flags (flags);
380 /* translate the flags */
381 if (flags & MONO_MMAP_PRIVATE)
382 mflags |= MAP_PRIVATE;
383 if (flags & MONO_MMAP_SHARED)
384 mflags |= MAP_SHARED;
385 if (flags & MONO_MMAP_FIXED)
387 if (flags & MONO_MMAP_32BIT)
390 BEGIN_CRITICAL_SECTION;
391 ptr = mmap (0, length, prot, mflags, fd, offset);
392 END_CRITICAL_SECTION;
393 if (ptr == MAP_FAILED)
395 *ret_handle = (void*)length;
401 * @addr: memory address returned by mono_file_map ()
402 * @handle: handle of memory map
404 * Remove the memory mapping at the address @addr.
405 * @handle must be the value returned in ret_handle by mono_file_map ().
407 * Returns: 0 on success.
410 mono_file_unmap (void *addr, void *handle)
414 BEGIN_CRITICAL_SECTION;
415 res = munmap (addr, (size_t)handle);
416 END_CRITICAL_SECTION;
423 * @addr: memory address
424 * @length: size of memory area
425 * @flags: new protection flags
427 * Change the protection for the memory area at @addr for @length bytes
428 * to matche the supplied @flags.
429 * If @flags includes MON_MMAP_DISCARD the pages are discarded from memory
430 * and the area is cleared to zero.
431 * @addr must be aligned to the page size.
432 * @length must be a multiple of the page size.
434 * Returns: 0 on success.
436 #if defined(__native_client__)
438 mono_mprotect (void *addr, size_t length, int flags)
440 int prot = prot_from_flags (flags);
443 if (flags & MONO_MMAP_DISCARD) memset (addr, 0, length);
445 new_addr = mmap(addr, length, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
446 if (new_addr == addr) return 0;
451 mono_mprotect (void *addr, size_t length, int flags)
453 int prot = prot_from_flags (flags);
455 if (flags & MONO_MMAP_DISCARD) {
456 /* on non-linux the pages are not guaranteed to be zeroed (*bsd, osx at least) */
458 if (madvise (addr, length, MADV_DONTNEED))
459 memset (addr, 0, length);
461 memset (addr, 0, length);
463 madvise (addr, length, MADV_DONTNEED);
464 madvise (addr, length, MADV_FREE);
466 posix_madvise (addr, length, POSIX_MADV_DONTNEED);
470 return mprotect (addr, length, prot);
472 #endif // __native_client__
476 /* dummy malloc-based implementation */
484 mono_valloc (void *addr, size_t length, int flags)
486 return malloc (length);
490 mono_valloc_aligned (size_t length, size_t alignment, int flags)
492 g_assert_not_reached ();
495 #define HAVE_VALLOC_ALIGNED
498 mono_vfree (void *addr, size_t length)
505 mono_mprotect (void *addr, size_t length, int flags)
507 if (flags & MONO_MMAP_DISCARD) {
508 memset (addr, 0, length);
515 #if defined(HAVE_SHM_OPEN) && !defined (DISABLE_SHARED_PERFCOUNTERS)
517 static int use_shared_area;
520 shared_area_disabled (void)
522 if (!use_shared_area) {
523 if (g_getenv ("MONO_DISABLE_SHARED_AREA"))
524 use_shared_area = -1;
528 return use_shared_area == -1;
532 mono_shared_area_instances_slow (void **array, int count, gboolean cleanup)
537 gpointer *processes = mono_process_list (&num);
538 for (i = 0; i < num; ++i) {
539 data = mono_shared_area_for_pid (processes [i]);
542 mono_shared_area_unload (data);
545 array [j++] = processes [i];
555 mono_shared_area_instances_helper (void **array, int count, gboolean cleanup)
559 int curpid = getpid ();
560 GDir *dir = g_dir_open ("/dev/shm/", 0, NULL);
562 return mono_shared_area_instances_slow (array, count, cleanup);
563 while ((name = g_dir_read_name (dir))) {
566 if (strncmp (name, "mono.", 5))
568 pid = strtol (name + 5, &nend, 10);
569 if (pid <= 0 || nend == name + 5 || *nend)
573 array [i++] = GINT_TO_POINTER (pid);
577 if (curpid != pid && kill (pid, 0) == -1 && (errno == ESRCH || errno == ENOMEM)) {
579 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
588 mono_shared_area (void)
592 /* we should allow the user to configure the size */
593 int size = mono_pagesize ();
598 if (shared_area_disabled ()) {
599 if (!malloced_shared_area)
600 malloced_shared_area = malloc_shared_area (0);
601 /* get the pid here */
602 return malloced_shared_area;
605 /* perform cleanup of segments left over from dead processes */
606 mono_shared_area_instances_helper (NULL, 0, TRUE);
608 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
610 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
611 if (fd == -1 && errno == EEXIST) {
614 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
616 /* in case of failure we try to return a memory area anyway,
617 * even if it means the data can't be read by other processes
620 return malloc_shared_area (pid);
621 if (ftruncate (fd, size) != 0) {
625 BEGIN_CRITICAL_SECTION;
626 res = mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
627 END_CRITICAL_SECTION;
629 if (res == MAP_FAILED) {
632 return malloc_shared_area (pid);
634 /* we don't need the file descriptor anymore */
636 header = (SAreaHeader *) res;
639 header->stats_start = sizeof (SAreaHeader);
640 header->stats_end = sizeof (SAreaHeader);
642 mono_atexit (mono_shared_area_remove);
647 mono_shared_area_remove (void)
651 if (shared_area_disabled ()) {
652 if (malloced_shared_area)
653 g_free (malloced_shared_area);
657 g_snprintf (buf, sizeof (buf), "/mono.%d", getpid ());
659 if (malloced_shared_area)
660 g_free (malloced_shared_area);
664 mono_shared_area_for_pid (void *pid)
667 /* we should allow the user to configure the size */
668 int size = mono_pagesize ();
672 if (shared_area_disabled ())
675 g_snprintf (buf, sizeof (buf), "/mono.%d", GPOINTER_TO_INT (pid));
677 fd = shm_open (buf, O_RDONLY, S_IRUSR|S_IRGRP);
680 BEGIN_CRITICAL_SECTION;
681 res = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0);
682 END_CRITICAL_SECTION;
684 if (res == MAP_FAILED) {
688 /* FIXME: validate the area */
689 /* we don't need the file descriptor anymore */
695 mono_shared_area_unload (void *area)
697 /* FIXME: currently we load only a page */
698 BEGIN_CRITICAL_SECTION;
699 munmap (area, mono_pagesize ());
700 END_CRITICAL_SECTION;
704 mono_shared_area_instances (void **array, int count)
706 return mono_shared_area_instances_helper (array, count, FALSE);
710 mono_shared_area (void)
712 if (!malloced_shared_area)
713 malloced_shared_area = malloc_shared_area (getpid ());
714 /* get the pid here */
715 return malloced_shared_area;
719 mono_shared_area_remove (void)
721 if (malloced_shared_area)
722 g_free (malloced_shared_area);
723 malloced_shared_area = NULL;
727 mono_shared_area_for_pid (void *pid)
733 mono_shared_area_unload (void *area)
738 mono_shared_area_instances (void **array, int count)
743 #endif // HAVE_SHM_OPEN
747 #ifndef HAVE_VALLOC_ALIGNED
749 mono_valloc_aligned (size_t size, size_t alignment, int flags)
751 /* Allocate twice the memory to be able to put the block on an aligned address */
752 char *mem = (char *) mono_valloc (NULL, size + alignment, flags);
758 aligned = aligned_address (mem, size, alignment);
761 mono_vfree (mem, aligned - mem);
762 if (aligned + size < mem + size + alignment)
763 mono_vfree (aligned + size, (mem + size + alignment) - (aligned + size));
770 mono_pages_not_faulted (void *addr, size_t size)
775 int pagesize = mono_pagesize ();
776 int npages = (size + pagesize - 1) / pagesize;
777 char *faulted = (char *) g_malloc0 (sizeof (char*) * npages);
780 * We cast `faulted` to void* because Linux wants an unsigned
781 * char* while BSD wants a char*.
784 if (mincore (addr, size, (unsigned char *)faulted) != 0) {
786 if (mincore (addr, size, (char *)faulted) != 0) {
791 for (i = 0; i < npages; ++i) {
792 if (faulted [i] != 0)