* Author:
* Dietmar Maurer (dietmar@ximian.com)
*
- * (C) 2001 Ximian, Inc.
+ * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
+ * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin Inc. (http://www.xamarin.com)
*/
#include <config.h>
#include <string.h>
#include "mempool.h"
+#include "mempool-internals.h"
#if USE_MALLOC_FOR_MEMPOOLS
#define MALLOC_ALLOCATION
*/
#define MEM_ALIGN 8
+#define ALIGN_SIZE(s) (((s) + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1))
+#define SIZEOF_MEM_POOL (ALIGN_SIZE (sizeof (MonoMemPool)))
+#if MONO_SMALL_CONFIG
+#define MONO_MEMPOOL_PAGESIZE 4096
+#define MONO_MEMPOOL_MINSIZE 256
+#else
#define MONO_MEMPOOL_PAGESIZE 8192
#define MONO_MEMPOOL_MINSIZE 512
+#endif
#ifndef G_LIKELY
#define G_LIKELY(a) (a)
};
#endif
+static long total_bytes_allocated = 0;
+
/**
* mono_mempool_new:
*
pool = g_malloc (initial_size);
pool->next = NULL;
- pool->pos = (guint8*)pool + sizeof (MonoMemPool);
- pool->end = pool->pos + initial_size - sizeof (MonoMemPool);
+ pool->pos = (guint8*)pool + SIZEOF_MEM_POOL;
+ pool->end = pool->pos + initial_size - SIZEOF_MEM_POOL;
pool->d.allocated = pool->size = initial_size;
+ total_bytes_allocated += initial_size;
return pool;
#endif
}
#else
MonoMemPool *p, *n;
+ total_bytes_allocated -= pool->d.allocated;
+
p = pool;
while (p) {
n = p->next;
pool->allocated = 0;
#else
- pool->pos = (guint8*)pool + sizeof (MonoMemPool);
- pool->end = pool->pos + pool->size - sizeof (MonoMemPool);
+ pool->pos = (guint8*)pool + SIZEOF_MEM_POOL;
+ pool->end = pool->pos + pool->size - SIZEOF_MEM_POOL;
#endif
}
#include "metadata/appdomain.h"
#include "metadata/metadata-internals.h"
+static mono_mutex_t mempool_tracing_lock;
+#define BACKTRACE_DEPTH 7
static void
-mono_backtrace (int limit)
+mono_backtrace (int size)
{
- void *array[limit];
+ void *array[BACKTRACE_DEPTH];
char **names;
- int i;
- backtrace (array, limit);
- names = backtrace_symbols (array, limit);
- for (i = 1; i < limit; ++i) {
+ int i, symbols;
+ static gboolean inited;
+
+ if (!inited) {
+ mono_mutex_init_recursive (&mempool_tracing_lock);
+ inited = TRUE;
+ }
+
+ mono_mutex_lock (&mempool_tracing_lock);
+ g_print ("Allocating %d bytes\n", size);
+ symbols = backtrace (array, BACKTRACE_DEPTH);
+ names = backtrace_symbols (array, symbols);
+ for (i = 1; i < symbols; ++i) {
g_print ("\t%s\n", names [i]);
}
- g_free (names);
+ free (names);
+ mono_mutex_unlock (&mempool_tracing_lock);
}
#endif
get_next_size (MonoMemPool *pool, int size)
{
int target = pool->next? pool->next->size: pool->size;
- size += sizeof (MonoMemPool);
+ size += SIZEOF_MEM_POOL;
/* increase the size */
target += target / 2;
while (target < size) {
target += target / 2;
}
- if (target > MONO_MEMPOOL_PAGESIZE)
+ if (target > MONO_MEMPOOL_PAGESIZE && size <= MONO_MEMPOOL_PAGESIZE)
target = MONO_MEMPOOL_PAGESIZE;
- /* we are called with size smaller than 4096 */
- g_assert (size <= MONO_MEMPOOL_PAGESIZE);
return target;
}
#endif
/**
* mono_mempool_alloc:
- * @pool: the momory pool to destroy
+ * @pool: the momory pool to use
* @size: size of the momory block
*
* Allocates a new block of memory in @pool.
mono_mempool_alloc (MonoMemPool *pool, guint size)
{
gpointer rval;
-
- size = (size + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1);
+
+ size = ALIGN_SIZE (size);
#ifdef MALLOC_ALLOCATION
{
- Chunk *c = g_malloc (sizeof (Chunk) + size);
+ Chunk *c = g_malloc (size + sizeof (Chunk));
c->next = pool->chunks;
pool->chunks = c;
- c->size = size;
+ c->size = size - sizeof(Chunk);
pool->allocated += size;
#ifdef TRACE_ALLOCATIONS
if (pool == mono_get_corlib ()->mempool) {
- g_print ("Allocating %d bytes\n", size);
- mono_backtrace (7);
+ mono_backtrace (size);
}
#endif
if (G_UNLIKELY (pool->pos >= pool->end)) {
pool->pos -= size;
if (size >= 4096) {
- MonoMemPool *np = g_malloc (sizeof (MonoMemPool) + size);
+ MonoMemPool *np = g_malloc (SIZEOF_MEM_POOL + size);
np->next = pool->next;
pool->next = np;
- np->pos = (guint8*)np + sizeof (MonoMemPool);
- np->size = sizeof (MonoMemPool) + size;
- np->end = np->pos + np->size - sizeof (MonoMemPool);
- pool->d.allocated += sizeof (MonoMemPool) + size;
- return (guint8*)np + sizeof (MonoMemPool);
+ np->pos = (guint8*)np + SIZEOF_MEM_POOL;
+ np->size = SIZEOF_MEM_POOL + size;
+ np->end = np->pos + np->size - SIZEOF_MEM_POOL;
+ pool->d.allocated += SIZEOF_MEM_POOL + size;
+ total_bytes_allocated += SIZEOF_MEM_POOL + size;
+ return (guint8*)np + SIZEOF_MEM_POOL;
} else {
int new_size = get_next_size (pool, size);
MonoMemPool *np = g_malloc (new_size);
np->next = pool->next;
pool->next = np;
- pool->pos = (guint8*)np + sizeof (MonoMemPool);
- np->pos = (guint8*)np + sizeof (MonoMemPool);
+ pool->pos = (guint8*)np + SIZEOF_MEM_POOL;
+ np->pos = (guint8*)np + SIZEOF_MEM_POOL;
np->size = new_size;
np->end = np->pos;
- pool->end = pool->pos + new_size - sizeof (MonoMemPool);
+ pool->end = pool->pos + new_size - SIZEOF_MEM_POOL;
pool->d.allocated += new_size;
+ total_bytes_allocated += new_size;
rval = pool->pos;
pool->pos += size;
}
}
+#endif
return rval;
-#endif
}
/**
#ifdef MALLOC_ALLOCATION
rval = mono_mempool_alloc (pool, size);
#else
- size = (size + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1);
+ size = ALIGN_SIZE (size);
rval = pool->pos;
pool->pos = (guint8*)rval + size;
if (G_UNLIKELY (pool->pos >= pool->end)) {
rval = mono_mempool_alloc (pool, size);
}
+#ifdef TRACE_ALLOCATIONS
+ else if (pool == mono_get_corlib ()->mempool) {
+ mono_backtrace (size);
+ }
+#endif
#endif
memset (rval, 0, size);
return pool->d.allocated;
#endif
}
+
+/**
+ * mono_mempool_get_bytes_allocated:
+ *
+ * Return the number of bytes currently allocated for mempools.
+ */
+long
+mono_mempool_get_bytes_allocated (void)
+{
+ return total_bytes_allocated;
+}