2 * mono-mmap.c: Support for mapping code into the process address space
5 * Mono Team (mono-list@lists.ximian.com)
7 * Copyright 2001-2008 Novell, Inc.
8 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <sys/types.h>
34 #include "mono-mmap.h"
35 #include "mono-mmap-internals.h"
36 #include "mono-proclib.h"
37 #include <mono/utils/mono-threads.h>
40 #define BEGIN_CRITICAL_SECTION do { \
41 MonoThreadInfo *__info = mono_thread_info_current_unchecked (); \
42 if (__info) __info->inside_critical_region = TRUE; \
44 #define END_CRITICAL_SECTION \
45 if (__info) __info->inside_critical_region = FALSE; \
49 #define MAP_ANONYMOUS MAP_ANON
64 static void* malloced_shared_area = NULL;
67 malloc_shared_area (int pid)
69 int size = mono_pagesize ();
70 SAreaHeader *sarea = (SAreaHeader *) g_malloc0 (size);
73 sarea->stats_start = sizeof (SAreaHeader);
74 sarea->stats_end = sizeof (SAreaHeader);
80 aligned_address (char *mem, size_t size, size_t alignment)
82 char *aligned = (char*)((size_t)(mem + (alignment - 1)) & ~(alignment - 1));
83 g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((size_t)aligned & (alignment - 1)));
93 static int saved_pagesize = 0;
95 return saved_pagesize;
96 GetSystemInfo (&info);
97 saved_pagesize = info.dwAllocationGranularity;
98 return saved_pagesize;
102 prot_from_flags (int flags)
104 int prot = flags & (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC);
106 case 0: prot = PAGE_NOACCESS; break;
107 case MONO_MMAP_READ: prot = PAGE_READONLY; break;
108 case MONO_MMAP_READ|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READ; break;
109 case MONO_MMAP_READ|MONO_MMAP_WRITE: prot = PAGE_READWRITE; break;
110 case MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READWRITE; break;
111 case MONO_MMAP_WRITE: prot = PAGE_READWRITE; break;
112 case MONO_MMAP_WRITE|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READWRITE; break;
113 case MONO_MMAP_EXEC: prot = PAGE_EXECUTE; break;
115 g_assert_not_reached ();
121 mono_valloc (void *addr, size_t length, int flags)
124 int mflags = MEM_RESERVE|MEM_COMMIT;
125 int prot = prot_from_flags (flags);
126 /* translate the flags */
128 ptr = VirtualAlloc (addr, length, mflags, prot);
133 mono_valloc_aligned (size_t length, size_t alignment, int flags)
135 int prot = prot_from_flags (flags);
136 char *mem = VirtualAlloc (NULL, length + alignment, MEM_RESERVE, prot);
142 aligned = aligned_address (mem, length, alignment);
144 aligned = VirtualAlloc (aligned, length, MEM_COMMIT, prot);
150 #define HAVE_VALLOC_ALIGNED
153 mono_vfree (void *addr, size_t length)
155 MEMORY_BASIC_INFORMATION mbi;
156 SIZE_T query_result = VirtualQuery (addr, &mbi, sizeof (mbi));
159 g_assert (query_result);
161 res = VirtualFree (mbi.AllocationBase, 0, MEM_RELEASE);
169 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
173 HANDLE file, mapping;
174 int prot = prot_from_flags (flags);
175 /* translate the flags */
176 /*if (flags & MONO_MMAP_PRIVATE)
177 mflags |= MAP_PRIVATE;
178 if (flags & MONO_MMAP_SHARED)
179 mflags |= MAP_SHARED;
180 if (flags & MONO_MMAP_ANON)
181 mflags |= MAP_ANONYMOUS;
182 if (flags & MONO_MMAP_FIXED)
184 if (flags & MONO_MMAP_32BIT)
185 mflags |= MAP_32BIT;*/
187 mflags = FILE_MAP_READ;
188 if (flags & MONO_MMAP_WRITE)
189 mflags = FILE_MAP_COPY;
191 file = (HANDLE) _get_osfhandle (fd);
192 mapping = CreateFileMapping (file, NULL, prot, 0, 0, NULL);
195 ptr = MapViewOfFile (mapping, mflags, 0, offset, length);
197 CloseHandle (mapping);
200 *ret_handle = (void*)mapping;
205 mono_file_unmap (void *addr, void *handle)
207 UnmapViewOfFile (addr);
208 CloseHandle ((HANDLE)handle);
213 mono_mprotect (void *addr, size_t length, int flags)
216 int prot = prot_from_flags (flags);
218 if (flags & MONO_MMAP_DISCARD) {
219 VirtualFree (addr, length, MEM_DECOMMIT);
220 VirtualAlloc (addr, length, MEM_COMMIT, prot);
223 return VirtualProtect (addr, length, prot, &oldprot) == 0;
227 mono_shared_area (void)
229 if (!malloced_shared_area)
230 malloced_shared_area = malloc_shared_area (0);
231 /* get the pid here */
232 return malloced_shared_area;
236 mono_shared_area_remove (void)
238 if (malloced_shared_area)
239 g_free (malloced_shared_area);
240 malloced_shared_area = NULL;
244 mono_shared_area_for_pid (void *pid)
250 mono_shared_area_unload (void *area)
255 mono_shared_area_instances (void **array, int count)
261 #if defined(HAVE_MMAP)
265 * Get the page size in use on the system. Addresses and sizes in the
266 * mono_mmap(), mono_munmap() and mono_mprotect() calls must be pagesize
269 * Returns: the page size in bytes.
274 static int saved_pagesize = 0;
276 return saved_pagesize;
277 saved_pagesize = getpagesize ();
278 return saved_pagesize;
282 prot_from_flags (int flags)
284 int prot = PROT_NONE;
285 /* translate the protection bits */
286 if (flags & MONO_MMAP_READ)
288 if (flags & MONO_MMAP_WRITE)
290 if (flags & MONO_MMAP_EXEC)
297 * @addr: memory address
298 * @length: memory area size
299 * @flags: protection flags
301 * Allocates @length bytes of virtual memory with the @flags
302 * protection. @addr can be a preferred memory address or a
303 * mandatory one if MONO_MMAP_FIXED is set in @flags.
304 * @addr must be pagesize aligned and can be NULL.
305 * @length must be a multiple of pagesize.
307 * Returns: NULL on failure, the address of the memory area otherwise
310 mono_valloc (void *addr, size_t length, int flags)
314 int prot = prot_from_flags (flags);
315 /* translate the flags */
316 if (flags & MONO_MMAP_FIXED)
318 if (flags & MONO_MMAP_32BIT)
321 mflags |= MAP_ANONYMOUS;
322 mflags |= MAP_PRIVATE;
324 BEGIN_CRITICAL_SECTION;
325 ptr = mmap (addr, length, prot, mflags, -1, 0);
326 if (ptr == MAP_FAILED) {
327 int fd = open ("/dev/zero", O_RDONLY);
329 ptr = mmap (addr, length, prot, mflags, fd, 0);
333 END_CRITICAL_SECTION;
335 if (ptr == MAP_FAILED)
342 * @addr: memory address returned by mono_valloc ()
343 * @length: size of memory area
345 * Remove the memory mapping at the address @addr.
347 * Returns: 0 on success.
350 mono_vfree (void *addr, size_t length)
353 BEGIN_CRITICAL_SECTION;
354 res = munmap (addr, length);
355 END_CRITICAL_SECTION;
361 * @length: size of data to map
362 * @flags: protection flags
363 * @fd: file descriptor
364 * @offset: offset in the file
365 * @ret_handle: pointer to storage for returning a handle for the map
367 * Map the area of the file pointed to by the file descriptor @fd, at offset
368 * @offset and of size @length in memory according to the protection flags
370 * @offset and @length must be multiples of the page size.
371 * @ret_handle must point to a void*: this value must be used when unmapping
372 * the memory area using mono_file_unmap ().
376 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
380 int prot = prot_from_flags (flags);
381 /* translate the flags */
382 if (flags & MONO_MMAP_PRIVATE)
383 mflags |= MAP_PRIVATE;
384 if (flags & MONO_MMAP_SHARED)
385 mflags |= MAP_SHARED;
386 if (flags & MONO_MMAP_FIXED)
388 if (flags & MONO_MMAP_32BIT)
391 BEGIN_CRITICAL_SECTION;
392 ptr = mmap (0, length, prot, mflags, fd, offset);
393 END_CRITICAL_SECTION;
394 if (ptr == MAP_FAILED)
396 *ret_handle = (void*)length;
402 * @addr: memory address returned by mono_file_map ()
403 * @handle: handle of memory map
405 * Remove the memory mapping at the address @addr.
406 * @handle must be the value returned in ret_handle by mono_file_map ().
408 * Returns: 0 on success.
411 mono_file_unmap (void *addr, void *handle)
415 BEGIN_CRITICAL_SECTION;
416 res = munmap (addr, (size_t)handle);
417 END_CRITICAL_SECTION;
424 * @addr: memory address
425 * @length: size of memory area
426 * @flags: new protection flags
428 * Change the protection for the memory area at @addr for @length bytes
429 * to matche the supplied @flags.
430 * If @flags includes MON_MMAP_DISCARD the pages are discarded from memory
431 * and the area is cleared to zero.
432 * @addr must be aligned to the page size.
433 * @length must be a multiple of the page size.
435 * Returns: 0 on success.
437 #if defined(__native_client__)
439 mono_mprotect (void *addr, size_t length, int flags)
441 int prot = prot_from_flags (flags);
444 if (flags & MONO_MMAP_DISCARD) memset (addr, 0, length);
446 new_addr = mmap(addr, length, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
447 if (new_addr == addr) return 0;
452 mono_mprotect (void *addr, size_t length, int flags)
454 int prot = prot_from_flags (flags);
456 if (flags & MONO_MMAP_DISCARD) {
457 /* on non-linux the pages are not guaranteed to be zeroed (*bsd, osx at least) */
459 if (madvise (addr, length, MADV_DONTNEED))
460 memset (addr, 0, length);
462 memset (addr, 0, length);
464 madvise (addr, length, MADV_DONTNEED);
465 madvise (addr, length, MADV_FREE);
467 posix_madvise (addr, length, POSIX_MADV_DONTNEED);
471 return mprotect (addr, length, prot);
473 #endif // __native_client__
477 /* dummy malloc-based implementation */
485 mono_valloc (void *addr, size_t length, int flags)
487 return malloc (length);
491 mono_valloc_aligned (size_t length, size_t alignment, int flags)
493 g_assert_not_reached ();
496 #define HAVE_VALLOC_ALIGNED
499 mono_vfree (void *addr, size_t length)
506 mono_mprotect (void *addr, size_t length, int flags)
508 if (flags & MONO_MMAP_DISCARD) {
509 memset (addr, 0, length);
516 #if defined(HAVE_SHM_OPEN) && !defined (DISABLE_SHARED_PERFCOUNTERS)
518 static int use_shared_area;
521 shared_area_disabled (void)
523 if (!use_shared_area) {
524 if (g_getenv ("MONO_DISABLE_SHARED_AREA"))
525 use_shared_area = -1;
529 return use_shared_area == -1;
533 mono_shared_area_instances_slow (void **array, int count, gboolean cleanup)
538 gpointer *processes = mono_process_list (&num);
539 for (i = 0; i < num; ++i) {
540 data = mono_shared_area_for_pid (processes [i]);
543 mono_shared_area_unload (data);
546 array [j++] = processes [i];
556 mono_shared_area_instances_helper (void **array, int count, gboolean cleanup)
560 int curpid = getpid ();
561 GDir *dir = g_dir_open ("/dev/shm/", 0, NULL);
563 return mono_shared_area_instances_slow (array, count, cleanup);
564 while ((name = g_dir_read_name (dir))) {
567 if (strncmp (name, "mono.", 5))
569 pid = strtol (name + 5, &nend, 10);
570 if (pid <= 0 || nend == name + 5 || *nend)
574 array [i++] = GINT_TO_POINTER (pid);
578 if (curpid != pid && kill (pid, 0) == -1 && (errno == ESRCH || errno == ENOMEM)) {
580 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
589 mono_shared_area (void)
593 /* we should allow the user to configure the size */
594 int size = mono_pagesize ();
599 if (shared_area_disabled ()) {
600 if (!malloced_shared_area)
601 malloced_shared_area = malloc_shared_area (0);
602 /* get the pid here */
603 return malloced_shared_area;
606 /* perform cleanup of segments left over from dead processes */
607 mono_shared_area_instances_helper (NULL, 0, TRUE);
609 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
611 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
612 if (fd == -1 && errno == EEXIST) {
615 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
617 /* in case of failure we try to return a memory area anyway,
618 * even if it means the data can't be read by other processes
621 return malloc_shared_area (pid);
622 if (ftruncate (fd, size) != 0) {
626 BEGIN_CRITICAL_SECTION;
627 res = mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
628 END_CRITICAL_SECTION;
630 if (res == MAP_FAILED) {
633 return malloc_shared_area (pid);
635 /* we don't need the file descriptor anymore */
637 header = (SAreaHeader *) res;
640 header->stats_start = sizeof (SAreaHeader);
641 header->stats_end = sizeof (SAreaHeader);
643 mono_atexit (mono_shared_area_remove);
648 mono_shared_area_remove (void)
652 if (shared_area_disabled ()) {
653 if (malloced_shared_area)
654 g_free (malloced_shared_area);
658 g_snprintf (buf, sizeof (buf), "/mono.%d", getpid ());
660 if (malloced_shared_area)
661 g_free (malloced_shared_area);
665 mono_shared_area_for_pid (void *pid)
668 /* we should allow the user to configure the size */
669 int size = mono_pagesize ();
673 if (shared_area_disabled ())
676 g_snprintf (buf, sizeof (buf), "/mono.%d", GPOINTER_TO_INT (pid));
678 fd = shm_open (buf, O_RDONLY, S_IRUSR|S_IRGRP);
681 BEGIN_CRITICAL_SECTION;
682 res = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0);
683 END_CRITICAL_SECTION;
685 if (res == MAP_FAILED) {
689 /* FIXME: validate the area */
690 /* we don't need the file descriptor anymore */
696 mono_shared_area_unload (void *area)
698 /* FIXME: currently we load only a page */
699 BEGIN_CRITICAL_SECTION;
700 munmap (area, mono_pagesize ());
701 END_CRITICAL_SECTION;
705 mono_shared_area_instances (void **array, int count)
707 return mono_shared_area_instances_helper (array, count, FALSE);
711 mono_shared_area (void)
713 if (!malloced_shared_area)
714 malloced_shared_area = malloc_shared_area (getpid ());
715 /* get the pid here */
716 return malloced_shared_area;
720 mono_shared_area_remove (void)
722 if (malloced_shared_area)
723 g_free (malloced_shared_area);
724 malloced_shared_area = NULL;
728 mono_shared_area_for_pid (void *pid)
734 mono_shared_area_unload (void *area)
739 mono_shared_area_instances (void **array, int count)
744 #endif // HAVE_SHM_OPEN
748 #ifndef HAVE_VALLOC_ALIGNED
750 mono_valloc_aligned (size_t size, size_t alignment, int flags)
752 /* Allocate twice the memory to be able to put the block on an aligned address */
753 char *mem = (char *) mono_valloc (NULL, size + alignment, flags);
759 aligned = aligned_address (mem, size, alignment);
762 mono_vfree (mem, aligned - mem);
763 if (aligned + size < mem + size + alignment)
764 mono_vfree (aligned + size, (mem + size + alignment) - (aligned + size));
771 mono_pages_not_faulted (void *addr, size_t size)
776 int pagesize = mono_pagesize ();
777 int npages = (size + pagesize - 1) / pagesize;
778 char *faulted = (char *) g_malloc0 (sizeof (char*) * npages);
781 * We cast `faulted` to void* because Linux wants an unsigned
782 * char* while BSD wants a char*.
785 if (mincore (addr, size, (unsigned char *)faulted) != 0) {
787 if (mincore (addr, size, (char *)faulted) != 0) {
792 for (i = 0; i < npages; ++i) {
793 if (faulted [i] != 0)