2 * mono-mmap.c: Support for mapping code into the process address space
5 * Mono Team (mono-list@lists.ximian.com)
7 * Copyright 2001-2008 Novell, Inc.
8 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <sys/types.h>
34 #include "mono-mmap.h"
35 #include "mono-mmap-internals.h"
36 #include "mono-proclib.h"
37 #include <mono/utils/mono-threads.h>
38 #include <mono/utils/atomic.h>
39 #include <mono/utils/mono-counters.h>
42 #define BEGIN_CRITICAL_SECTION do { \
43 MonoThreadInfo *__info = mono_thread_info_current_unchecked (); \
44 if (__info) __info->inside_critical_region = TRUE; \
46 #define END_CRITICAL_SECTION \
47 if (__info) __info->inside_critical_region = FALSE; \
51 #define MAP_ANONYMOUS MAP_ANON
66 static void* malloced_shared_area = NULL;
69 malloc_shared_area (int pid)
71 int size = mono_pagesize ();
72 SAreaHeader *sarea = (SAreaHeader *) g_malloc0 (size);
75 sarea->stats_start = sizeof (SAreaHeader);
76 sarea->stats_end = sizeof (SAreaHeader);
82 aligned_address (char *mem, size_t size, size_t alignment)
84 char *aligned = (char*)((size_t)(mem + (alignment - 1)) & ~(alignment - 1));
85 g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((size_t)aligned & (alignment - 1)));
89 static volatile size_t allocation_count [MONO_MEM_ACCOUNT_MAX];
92 account_mem (MonoMemAccountType type, ssize_t size)
94 #if SIZEOF_VOID_P == 4
95 InterlockedAdd ((volatile gint32*)&allocation_count [type], (gint32)size);
97 InterlockedAdd64 ((volatile gint64*)&allocation_count [type], (gint64)size);
102 mono_mem_account_type_name (MonoMemAccountType type)
104 static const char *names[] = {
113 "SGen shadow card table",
115 "SGen binary protocol",
125 mono_mem_account_register_counters (void)
127 for (int i = 0; i < MONO_MEM_ACCOUNT_MAX; ++i) {
128 const char *prefix = "Valloc ";
129 const char *name = mono_mem_account_type_name (i);
131 g_assert (strlen (prefix) + strlen (name) < sizeof (descr));
132 sprintf (descr, "%s%s", prefix, name);
133 mono_counters_register (descr, MONO_COUNTER_WORD | MONO_COUNTER_RUNTIME | MONO_COUNTER_BYTES | MONO_COUNTER_VARIABLE, (void*)&allocation_count [i]);
143 static int saved_pagesize = 0;
145 return saved_pagesize;
146 GetSystemInfo (&info);
147 saved_pagesize = info.dwAllocationGranularity;
148 return saved_pagesize;
152 prot_from_flags (int flags)
154 int prot = flags & (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC);
156 case 0: prot = PAGE_NOACCESS; break;
157 case MONO_MMAP_READ: prot = PAGE_READONLY; break;
158 case MONO_MMAP_READ|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READ; break;
159 case MONO_MMAP_READ|MONO_MMAP_WRITE: prot = PAGE_READWRITE; break;
160 case MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READWRITE; break;
161 case MONO_MMAP_WRITE: prot = PAGE_READWRITE; break;
162 case MONO_MMAP_WRITE|MONO_MMAP_EXEC: prot = PAGE_EXECUTE_READWRITE; break;
163 case MONO_MMAP_EXEC: prot = PAGE_EXECUTE; break;
165 g_assert_not_reached ();
171 mono_valloc (void *addr, size_t length, int flags, MonoMemAccountType type)
174 int mflags = MEM_RESERVE|MEM_COMMIT;
175 int prot = prot_from_flags (flags);
176 /* translate the flags */
178 ptr = VirtualAlloc (addr, length, mflags, prot);
180 account_mem (type, (ssize_t)length);
186 mono_valloc_aligned (size_t length, size_t alignment, int flags, MonoMemAccountType type)
188 int prot = prot_from_flags (flags);
189 char *mem = VirtualAlloc (NULL, length + alignment, MEM_RESERVE, prot);
195 aligned = aligned_address (mem, length, alignment);
197 aligned = VirtualAlloc (aligned, length, MEM_COMMIT, prot);
200 account_mem (type, (ssize_t)length);
205 #define HAVE_VALLOC_ALIGNED
208 mono_vfree (void *addr, size_t length, MonoMemAccountType type)
210 MEMORY_BASIC_INFORMATION mbi;
211 SIZE_T query_result = VirtualQuery (addr, &mbi, sizeof (mbi));
214 g_assert (query_result);
216 res = VirtualFree (mbi.AllocationBase, 0, MEM_RELEASE);
220 account_mem (type, -(ssize_t)length);
226 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
230 HANDLE file, mapping;
231 int prot = prot_from_flags (flags);
232 /* translate the flags */
233 /*if (flags & MONO_MMAP_PRIVATE)
234 mflags |= MAP_PRIVATE;
235 if (flags & MONO_MMAP_SHARED)
236 mflags |= MAP_SHARED;
237 if (flags & MONO_MMAP_ANON)
238 mflags |= MAP_ANONYMOUS;
239 if (flags & MONO_MMAP_FIXED)
241 if (flags & MONO_MMAP_32BIT)
242 mflags |= MAP_32BIT;*/
244 mflags = FILE_MAP_READ;
245 if (flags & MONO_MMAP_WRITE)
246 mflags = FILE_MAP_COPY;
248 file = (HANDLE) _get_osfhandle (fd);
249 mapping = CreateFileMapping (file, NULL, prot, 0, 0, NULL);
252 ptr = MapViewOfFile (mapping, mflags, 0, offset, length);
254 CloseHandle (mapping);
257 *ret_handle = (void*)mapping;
262 mono_file_unmap (void *addr, void *handle)
264 UnmapViewOfFile (addr);
265 CloseHandle ((HANDLE)handle);
270 mono_mprotect (void *addr, size_t length, int flags)
273 int prot = prot_from_flags (flags);
275 if (flags & MONO_MMAP_DISCARD) {
276 VirtualFree (addr, length, MEM_DECOMMIT);
277 VirtualAlloc (addr, length, MEM_COMMIT, prot);
280 return VirtualProtect (addr, length, prot, &oldprot) == 0;
284 mono_shared_area (void)
286 if (!malloced_shared_area)
287 malloced_shared_area = malloc_shared_area (0);
288 /* get the pid here */
289 return malloced_shared_area;
293 mono_shared_area_remove (void)
295 if (malloced_shared_area)
296 g_free (malloced_shared_area);
297 malloced_shared_area = NULL;
301 mono_shared_area_for_pid (void *pid)
307 mono_shared_area_unload (void *area)
312 mono_shared_area_instances (void **array, int count)
318 #if defined(HAVE_MMAP)
322 * Get the page size in use on the system. Addresses and sizes in the
323 * mono_mmap(), mono_munmap() and mono_mprotect() calls must be pagesize
326 * Returns: the page size in bytes.
331 static int saved_pagesize = 0;
333 return saved_pagesize;
334 saved_pagesize = getpagesize ();
335 return saved_pagesize;
339 prot_from_flags (int flags)
341 int prot = PROT_NONE;
342 /* translate the protection bits */
343 if (flags & MONO_MMAP_READ)
345 if (flags & MONO_MMAP_WRITE)
347 if (flags & MONO_MMAP_EXEC)
354 * @addr: memory address
355 * @length: memory area size
356 * @flags: protection flags
358 * Allocates @length bytes of virtual memory with the @flags
359 * protection. @addr can be a preferred memory address or a
360 * mandatory one if MONO_MMAP_FIXED is set in @flags.
361 * @addr must be pagesize aligned and can be NULL.
362 * @length must be a multiple of pagesize.
364 * Returns: NULL on failure, the address of the memory area otherwise
367 mono_valloc (void *addr, size_t length, int flags, MonoMemAccountType type)
371 int prot = prot_from_flags (flags);
372 /* translate the flags */
373 if (flags & MONO_MMAP_FIXED)
375 if (flags & MONO_MMAP_32BIT)
378 mflags |= MAP_ANONYMOUS;
379 mflags |= MAP_PRIVATE;
381 BEGIN_CRITICAL_SECTION;
382 ptr = mmap (addr, length, prot, mflags, -1, 0);
383 if (ptr == MAP_FAILED) {
384 int fd = open ("/dev/zero", O_RDONLY);
386 ptr = mmap (addr, length, prot, mflags, fd, 0);
390 END_CRITICAL_SECTION;
392 if (ptr == MAP_FAILED)
395 account_mem (type, (ssize_t)length);
402 * @addr: memory address returned by mono_valloc ()
403 * @length: size of memory area
405 * Remove the memory mapping at the address @addr.
407 * Returns: 0 on success.
410 mono_vfree (void *addr, size_t length, MonoMemAccountType type)
413 BEGIN_CRITICAL_SECTION;
414 res = munmap (addr, length);
415 END_CRITICAL_SECTION;
417 account_mem (type, -(ssize_t)length);
424 * @length: size of data to map
425 * @flags: protection flags
426 * @fd: file descriptor
427 * @offset: offset in the file
428 * @ret_handle: pointer to storage for returning a handle for the map
430 * Map the area of the file pointed to by the file descriptor @fd, at offset
431 * @offset and of size @length in memory according to the protection flags
433 * @offset and @length must be multiples of the page size.
434 * @ret_handle must point to a void*: this value must be used when unmapping
435 * the memory area using mono_file_unmap ().
439 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
443 int prot = prot_from_flags (flags);
444 /* translate the flags */
445 if (flags & MONO_MMAP_PRIVATE)
446 mflags |= MAP_PRIVATE;
447 if (flags & MONO_MMAP_SHARED)
448 mflags |= MAP_SHARED;
449 if (flags & MONO_MMAP_FIXED)
451 if (flags & MONO_MMAP_32BIT)
454 BEGIN_CRITICAL_SECTION;
455 ptr = mmap (0, length, prot, mflags, fd, offset);
456 END_CRITICAL_SECTION;
457 if (ptr == MAP_FAILED)
459 *ret_handle = (void*)length;
465 * @addr: memory address returned by mono_file_map ()
466 * @handle: handle of memory map
468 * Remove the memory mapping at the address @addr.
469 * @handle must be the value returned in ret_handle by mono_file_map ().
471 * Returns: 0 on success.
474 mono_file_unmap (void *addr, void *handle)
478 BEGIN_CRITICAL_SECTION;
479 res = munmap (addr, (size_t)handle);
480 END_CRITICAL_SECTION;
487 * @addr: memory address
488 * @length: size of memory area
489 * @flags: new protection flags
491 * Change the protection for the memory area at @addr for @length bytes
492 * to matche the supplied @flags.
493 * If @flags includes MON_MMAP_DISCARD the pages are discarded from memory
494 * and the area is cleared to zero.
495 * @addr must be aligned to the page size.
496 * @length must be a multiple of the page size.
498 * Returns: 0 on success.
500 #if defined(__native_client__)
502 mono_mprotect (void *addr, size_t length, int flags)
504 int prot = prot_from_flags (flags);
507 if (flags & MONO_MMAP_DISCARD) memset (addr, 0, length);
509 new_addr = mmap(addr, length, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
510 if (new_addr == addr) return 0;
515 mono_mprotect (void *addr, size_t length, int flags)
517 int prot = prot_from_flags (flags);
519 if (flags & MONO_MMAP_DISCARD) {
520 /* on non-linux the pages are not guaranteed to be zeroed (*bsd, osx at least) */
522 if (madvise (addr, length, MADV_DONTNEED))
523 memset (addr, 0, length);
525 memset (addr, 0, length);
527 madvise (addr, length, MADV_DONTNEED);
528 madvise (addr, length, MADV_FREE);
530 posix_madvise (addr, length, POSIX_MADV_DONTNEED);
534 return mprotect (addr, length, prot);
536 #endif // __native_client__
540 /* dummy malloc-based implementation */
548 mono_valloc (void *addr, size_t length, int flags, MonoMemAccountType type)
550 return g_malloc (length);
554 mono_valloc_aligned (size_t size, size_t alignment, int flags, MonoMemAccountType type)
556 g_assert_not_reached ();
559 #define HAVE_VALLOC_ALIGNED
562 mono_vfree (void *addr, size_t length, MonoMemAccountType type)
569 mono_mprotect (void *addr, size_t length, int flags)
571 if (flags & MONO_MMAP_DISCARD) {
572 memset (addr, 0, length);
579 #if defined(HAVE_SHM_OPEN) && !defined (DISABLE_SHARED_PERFCOUNTERS)
581 static int use_shared_area;
584 shared_area_disabled (void)
586 if (!use_shared_area) {
587 if (g_getenv ("MONO_DISABLE_SHARED_AREA"))
588 use_shared_area = -1;
592 return use_shared_area == -1;
596 mono_shared_area_instances_slow (void **array, int count, gboolean cleanup)
601 gpointer *processes = mono_process_list (&num);
602 for (i = 0; i < num; ++i) {
603 data = mono_shared_area_for_pid (processes [i]);
606 mono_shared_area_unload (data);
609 array [j++] = processes [i];
619 mono_shared_area_instances_helper (void **array, int count, gboolean cleanup)
623 int curpid = getpid ();
624 GDir *dir = g_dir_open ("/dev/shm/", 0, NULL);
626 return mono_shared_area_instances_slow (array, count, cleanup);
627 while ((name = g_dir_read_name (dir))) {
630 if (strncmp (name, "mono.", 5))
632 pid = strtol (name + 5, &nend, 10);
633 if (pid <= 0 || nend == name + 5 || *nend)
637 array [i++] = GINT_TO_POINTER (pid);
641 if (curpid != pid && kill (pid, 0) == -1 && (errno == ESRCH || errno == ENOMEM)) {
643 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
652 mono_shared_area (void)
656 /* we should allow the user to configure the size */
657 int size = mono_pagesize ();
662 if (shared_area_disabled ()) {
663 if (!malloced_shared_area)
664 malloced_shared_area = malloc_shared_area (0);
665 /* get the pid here */
666 return malloced_shared_area;
669 /* perform cleanup of segments left over from dead processes */
670 mono_shared_area_instances_helper (NULL, 0, TRUE);
672 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
674 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
675 if (fd == -1 && errno == EEXIST) {
678 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
680 /* in case of failure we try to return a memory area anyway,
681 * even if it means the data can't be read by other processes
684 return malloc_shared_area (pid);
685 if (ftruncate (fd, size) != 0) {
689 BEGIN_CRITICAL_SECTION;
690 res = mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
691 END_CRITICAL_SECTION;
693 if (res == MAP_FAILED) {
696 return malloc_shared_area (pid);
698 /* we don't need the file descriptor anymore */
700 header = (SAreaHeader *) res;
703 header->stats_start = sizeof (SAreaHeader);
704 header->stats_end = sizeof (SAreaHeader);
706 mono_atexit (mono_shared_area_remove);
711 mono_shared_area_remove (void)
715 if (shared_area_disabled ()) {
716 if (malloced_shared_area)
717 g_free (malloced_shared_area);
721 g_snprintf (buf, sizeof (buf), "/mono.%d", getpid ());
723 if (malloced_shared_area)
724 g_free (malloced_shared_area);
728 mono_shared_area_for_pid (void *pid)
731 /* we should allow the user to configure the size */
732 int size = mono_pagesize ();
736 if (shared_area_disabled ())
739 g_snprintf (buf, sizeof (buf), "/mono.%d", GPOINTER_TO_INT (pid));
741 fd = shm_open (buf, O_RDONLY, S_IRUSR|S_IRGRP);
744 BEGIN_CRITICAL_SECTION;
745 res = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0);
746 END_CRITICAL_SECTION;
748 if (res == MAP_FAILED) {
752 /* FIXME: validate the area */
753 /* we don't need the file descriptor anymore */
759 mono_shared_area_unload (void *area)
761 /* FIXME: currently we load only a page */
762 BEGIN_CRITICAL_SECTION;
763 munmap (area, mono_pagesize ());
764 END_CRITICAL_SECTION;
768 mono_shared_area_instances (void **array, int count)
770 return mono_shared_area_instances_helper (array, count, FALSE);
774 mono_shared_area (void)
776 if (!malloced_shared_area)
777 malloced_shared_area = malloc_shared_area (getpid ());
778 /* get the pid here */
779 return malloced_shared_area;
783 mono_shared_area_remove (void)
785 if (malloced_shared_area)
786 g_free (malloced_shared_area);
787 malloced_shared_area = NULL;
791 mono_shared_area_for_pid (void *pid)
797 mono_shared_area_unload (void *area)
802 mono_shared_area_instances (void **array, int count)
807 #endif // HAVE_SHM_OPEN
811 #ifndef HAVE_VALLOC_ALIGNED
813 mono_valloc_aligned (size_t size, size_t alignment, int flags, MonoMemAccountType type)
815 /* Allocate twice the memory to be able to put the block on an aligned address */
816 char *mem = (char *) mono_valloc (NULL, size + alignment, flags, type);
822 aligned = aligned_address (mem, size, alignment);
825 mono_vfree (mem, aligned - mem, type);
826 if (aligned + size < mem + size + alignment)
827 mono_vfree (aligned + size, (mem + size + alignment) - (aligned + size), type);
834 mono_pages_not_faulted (void *addr, size_t size)
839 int pagesize = mono_pagesize ();
840 int npages = (size + pagesize - 1) / pagesize;
841 char *faulted = (char *) g_malloc0 (sizeof (char*) * npages);
844 * We cast `faulted` to void* because Linux wants an unsigned
845 * char* while BSD wants a char*.
848 if (mincore (addr, size, (unsigned char *)faulted) != 0) {
850 if (mincore (addr, size, (char *)faulted) != 0) {
855 for (i = 0; i < npages; ++i) {
856 if (faulted [i] != 0)