3 * Support for mapping code into the process address space
6 * Mono Team (mono-list@lists.ximian.com)
8 * Copyright 2001-2008 Novell, Inc.
9 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <sys/types.h>
30 #endif /* !HOST_WIN32 */
32 #include "mono-mmap.h"
33 #include "mono-mmap-internals.h"
34 #include "mono-proclib.h"
35 #include <mono/utils/mono-threads.h>
36 #include <mono/utils/atomic.h>
37 #include <mono/utils/mono-counters.h>
39 #define BEGIN_CRITICAL_SECTION do { \
40 MonoThreadInfo *__info = mono_thread_info_current_unchecked (); \
41 if (__info) __info->inside_critical_region = TRUE; \
43 #define END_CRITICAL_SECTION \
44 if (__info) __info->inside_critical_region = FALSE; \
48 #define MAP_ANONYMOUS MAP_ANON
64 malloc_shared_area (int pid)
66 int size = mono_pagesize ();
67 SAreaHeader *sarea = (SAreaHeader *) g_malloc0 (size);
70 sarea->stats_start = sizeof (SAreaHeader);
71 sarea->stats_end = sizeof (SAreaHeader);
77 aligned_address (char *mem, size_t size, size_t alignment)
79 char *aligned = (char*)((size_t)(mem + (alignment - 1)) & ~(alignment - 1));
80 g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((size_t)aligned & (alignment - 1)));
84 static volatile size_t allocation_count [MONO_MEM_ACCOUNT_MAX];
87 account_mem (MonoMemAccountType type, ssize_t size)
89 #if SIZEOF_VOID_P == 4
90 InterlockedAdd ((volatile gint32*)&allocation_count [type], (gint32)size);
92 InterlockedAdd64 ((volatile gint64*)&allocation_count [type], (gint64)size);
97 mono_mem_account_type_name (MonoMemAccountType type)
99 static const char *names[] = {
108 "SGen shadow card table",
110 "SGen binary protocol",
120 mono_mem_account_register_counters (void)
122 for (int i = 0; i < MONO_MEM_ACCOUNT_MAX; ++i) {
123 const char *prefix = "Valloc ";
124 const char *name = mono_mem_account_type_name (i);
126 g_assert (strlen (prefix) + strlen (name) < sizeof (descr));
127 sprintf (descr, "%s%s", prefix, name);
128 mono_counters_register (descr, MONO_COUNTER_WORD | MONO_COUNTER_RUNTIME | MONO_COUNTER_BYTES | MONO_COUNTER_VARIABLE, (void*)&allocation_count [i]);
133 // Windows specific implementation in mono-mmap-windows.c
134 #define HAVE_VALLOC_ALIGNED
138 static void* malloced_shared_area = NULL;
139 #if defined(HAVE_MMAP)
143 * Get the page size in use on the system. Addresses and sizes in the
144 * mono_mmap(), mono_munmap() and mono_mprotect() calls must be pagesize
147 * Returns: the page size in bytes.
152 static int saved_pagesize = 0;
154 return saved_pagesize;
155 saved_pagesize = getpagesize ();
156 return saved_pagesize;
160 mono_valloc_granule (void)
162 return mono_pagesize ();
166 prot_from_flags (int flags)
168 int prot = PROT_NONE;
169 /* translate the protection bits */
170 if (flags & MONO_MMAP_READ)
172 if (flags & MONO_MMAP_WRITE)
174 if (flags & MONO_MMAP_EXEC)
181 * @addr: memory address
182 * @length: memory area size
183 * @flags: protection flags
185 * Allocates @length bytes of virtual memory with the @flags
186 * protection. @addr can be a preferred memory address or a
187 * mandatory one if MONO_MMAP_FIXED is set in @flags.
188 * @addr must be pagesize aligned and can be NULL.
189 * @length must be a multiple of pagesize.
191 * Returns: NULL on failure, the address of the memory area otherwise
194 mono_valloc (void *addr, size_t length, int flags, MonoMemAccountType type)
198 int prot = prot_from_flags (flags);
199 /* translate the flags */
200 if (flags & MONO_MMAP_FIXED)
202 if (flags & MONO_MMAP_32BIT)
205 mflags |= MAP_ANONYMOUS;
206 mflags |= MAP_PRIVATE;
208 BEGIN_CRITICAL_SECTION;
209 ptr = mmap (addr, length, prot, mflags, -1, 0);
210 if (ptr == MAP_FAILED) {
211 int fd = open ("/dev/zero", O_RDONLY);
213 ptr = mmap (addr, length, prot, mflags, fd, 0);
217 END_CRITICAL_SECTION;
219 if (ptr == MAP_FAILED)
222 account_mem (type, (ssize_t)length);
229 * @addr: memory address returned by mono_valloc ()
230 * @length: size of memory area
232 * Remove the memory mapping at the address @addr.
234 * Returns: 0 on success.
237 mono_vfree (void *addr, size_t length, MonoMemAccountType type)
240 BEGIN_CRITICAL_SECTION;
241 res = munmap (addr, length);
242 END_CRITICAL_SECTION;
244 account_mem (type, -(ssize_t)length);
251 * @length: size of data to map
252 * @flags: protection flags
253 * @fd: file descriptor
254 * @offset: offset in the file
255 * @ret_handle: pointer to storage for returning a handle for the map
257 * Map the area of the file pointed to by the file descriptor @fd, at offset
258 * @offset and of size @length in memory according to the protection flags
260 * @offset and @length must be multiples of the page size.
261 * @ret_handle must point to a void*: this value must be used when unmapping
262 * the memory area using mono_file_unmap ().
266 mono_file_map (size_t length, int flags, int fd, guint64 offset, void **ret_handle)
270 int prot = prot_from_flags (flags);
271 /* translate the flags */
272 if (flags & MONO_MMAP_PRIVATE)
273 mflags |= MAP_PRIVATE;
274 if (flags & MONO_MMAP_SHARED)
275 mflags |= MAP_SHARED;
276 if (flags & MONO_MMAP_FIXED)
278 if (flags & MONO_MMAP_32BIT)
281 BEGIN_CRITICAL_SECTION;
282 ptr = mmap (0, length, prot, mflags, fd, offset);
283 END_CRITICAL_SECTION;
284 if (ptr == MAP_FAILED)
286 *ret_handle = (void*)length;
292 * @addr: memory address returned by mono_file_map ()
293 * @handle: handle of memory map
295 * Remove the memory mapping at the address @addr.
296 * @handle must be the value returned in ret_handle by mono_file_map ().
298 * Returns: 0 on success.
301 mono_file_unmap (void *addr, void *handle)
305 BEGIN_CRITICAL_SECTION;
306 res = munmap (addr, (size_t)handle);
307 END_CRITICAL_SECTION;
314 * @addr: memory address
315 * @length: size of memory area
316 * @flags: new protection flags
318 * Change the protection for the memory area at @addr for @length bytes
319 * to matche the supplied @flags.
320 * If @flags includes MON_MMAP_DISCARD the pages are discarded from memory
321 * and the area is cleared to zero.
322 * @addr must be aligned to the page size.
323 * @length must be a multiple of the page size.
325 * Returns: 0 on success.
327 #if defined(__native_client__)
329 mono_mprotect (void *addr, size_t length, int flags)
331 int prot = prot_from_flags (flags);
334 if (flags & MONO_MMAP_DISCARD) memset (addr, 0, length);
336 new_addr = mmap(addr, length, prot, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
337 if (new_addr == addr) return 0;
342 mono_mprotect (void *addr, size_t length, int flags)
344 int prot = prot_from_flags (flags);
346 if (flags & MONO_MMAP_DISCARD) {
347 /* on non-linux the pages are not guaranteed to be zeroed (*bsd, osx at least) */
349 if (madvise (addr, length, MADV_DONTNEED))
350 memset (addr, 0, length);
352 memset (addr, 0, length);
354 madvise (addr, length, MADV_DONTNEED);
355 madvise (addr, length, MADV_FREE);
357 posix_madvise (addr, length, POSIX_MADV_DONTNEED);
361 return mprotect (addr, length, prot);
363 #endif // __native_client__
367 /* dummy malloc-based implementation */
375 mono_valloc_granule (void)
377 return mono_pagesize ();
381 mono_valloc (void *addr, size_t length, int flags, MonoMemAccountType type)
383 return g_malloc (length);
387 mono_valloc_aligned (size_t size, size_t alignment, int flags, MonoMemAccountType type)
389 g_assert_not_reached ();
392 #define HAVE_VALLOC_ALIGNED
395 mono_vfree (void *addr, size_t length, MonoMemAccountType type)
402 mono_mprotect (void *addr, size_t length, int flags)
404 if (flags & MONO_MMAP_DISCARD) {
405 memset (addr, 0, length);
412 #if defined(HAVE_SHM_OPEN) && !defined (DISABLE_SHARED_PERFCOUNTERS)
414 static int use_shared_area;
417 shared_area_disabled (void)
419 if (!use_shared_area) {
420 if (g_getenv ("MONO_DISABLE_SHARED_AREA"))
421 use_shared_area = -1;
425 return use_shared_area == -1;
429 mono_shared_area_instances_slow (void **array, int count, gboolean cleanup)
434 gpointer *processes = mono_process_list (&num);
435 for (i = 0; i < num; ++i) {
436 data = mono_shared_area_for_pid (processes [i]);
439 mono_shared_area_unload (data);
442 array [j++] = processes [i];
452 mono_shared_area_instances_helper (void **array, int count, gboolean cleanup)
456 int curpid = getpid ();
457 GDir *dir = g_dir_open ("/dev/shm/", 0, NULL);
459 return mono_shared_area_instances_slow (array, count, cleanup);
460 while ((name = g_dir_read_name (dir))) {
463 if (strncmp (name, "mono.", 5))
465 pid = strtol (name + 5, &nend, 10);
466 if (pid <= 0 || nend == name + 5 || *nend)
470 array [i++] = GINT_TO_POINTER (pid);
474 if (curpid != pid && kill (pid, 0) == -1 && (errno == ESRCH || errno == ENOMEM)) {
476 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
485 mono_shared_area (void)
489 /* we should allow the user to configure the size */
490 int size = mono_pagesize ();
495 if (shared_area_disabled ()) {
496 if (!malloced_shared_area)
497 malloced_shared_area = malloc_shared_area (0);
498 /* get the pid here */
499 return malloced_shared_area;
502 /* perform cleanup of segments left over from dead processes */
503 mono_shared_area_instances_helper (NULL, 0, TRUE);
505 g_snprintf (buf, sizeof (buf), "/mono.%d", pid);
507 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
508 if (fd == -1 && errno == EEXIST) {
511 fd = shm_open (buf, O_CREAT|O_EXCL|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP);
513 /* in case of failure we try to return a memory area anyway,
514 * even if it means the data can't be read by other processes
517 return malloc_shared_area (pid);
518 if (ftruncate (fd, size) != 0) {
522 BEGIN_CRITICAL_SECTION;
523 res = mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
524 END_CRITICAL_SECTION;
526 if (res == MAP_FAILED) {
529 return malloc_shared_area (pid);
531 /* we don't need the file descriptor anymore */
533 header = (SAreaHeader *) res;
536 header->stats_start = sizeof (SAreaHeader);
537 header->stats_end = sizeof (SAreaHeader);
539 mono_atexit (mono_shared_area_remove);
544 mono_shared_area_remove (void)
548 if (shared_area_disabled ()) {
549 if (malloced_shared_area)
550 g_free (malloced_shared_area);
554 g_snprintf (buf, sizeof (buf), "/mono.%d", getpid ());
556 if (malloced_shared_area)
557 g_free (malloced_shared_area);
561 mono_shared_area_for_pid (void *pid)
564 /* we should allow the user to configure the size */
565 int size = mono_pagesize ();
569 if (shared_area_disabled ())
572 g_snprintf (buf, sizeof (buf), "/mono.%d", GPOINTER_TO_INT (pid));
574 fd = shm_open (buf, O_RDONLY, S_IRUSR|S_IRGRP);
577 BEGIN_CRITICAL_SECTION;
578 res = mmap (NULL, size, PROT_READ, MAP_SHARED, fd, 0);
579 END_CRITICAL_SECTION;
581 if (res == MAP_FAILED) {
585 /* FIXME: validate the area */
586 /* we don't need the file descriptor anymore */
592 mono_shared_area_unload (void *area)
594 /* FIXME: currently we load only a page */
595 BEGIN_CRITICAL_SECTION;
596 munmap (area, mono_pagesize ());
597 END_CRITICAL_SECTION;
601 mono_shared_area_instances (void **array, int count)
603 return mono_shared_area_instances_helper (array, count, FALSE);
607 mono_shared_area (void)
609 if (!malloced_shared_area)
610 malloced_shared_area = malloc_shared_area (getpid ());
611 /* get the pid here */
612 return malloced_shared_area;
616 mono_shared_area_remove (void)
618 if (malloced_shared_area)
619 g_free (malloced_shared_area);
620 malloced_shared_area = NULL;
624 mono_shared_area_for_pid (void *pid)
630 mono_shared_area_unload (void *area)
635 mono_shared_area_instances (void **array, int count)
640 #endif // HAVE_SHM_OPEN
644 #ifndef HAVE_VALLOC_ALIGNED
646 mono_valloc_aligned (size_t size, size_t alignment, int flags, MonoMemAccountType type)
648 /* Allocate twice the memory to be able to put the block on an aligned address */
649 char *mem = (char *) mono_valloc (NULL, size + alignment, flags, type);
655 aligned = aligned_address (mem, size, alignment);
658 mono_vfree (mem, aligned - mem, type);
659 if (aligned + size < mem + size + alignment)
660 mono_vfree (aligned + size, (mem + size + alignment) - (aligned + size), type);
667 mono_pages_not_faulted (void *addr, size_t size)
672 int pagesize = mono_pagesize ();
673 int npages = (size + pagesize - 1) / pagesize;
674 char *faulted = (char *) g_malloc0 (sizeof (char*) * npages);
677 * We cast `faulted` to void* because Linux wants an unsigned
678 * char* while BSD wants a char*.
681 if (mincore (addr, size, (unsigned char *)faulted) != 0) {
683 if (mincore (addr, size, (char *)faulted) != 0) {
688 for (i = 0; i < npages; ++i) {
689 if (faulted [i] != 0)