major.start_nursery_collection ();
- gray_object_queue_init (&gray_queue);
+ gray_object_queue_init (&gray_queue, mono_sgen_get_unmanaged_allocator ());
num_minor_gcs++;
mono_stats.minor_gc_count ++;
binary_protocol_collection (GENERATION_OLD);
check_scan_starts ();
- gray_object_queue_init (&gray_queue);
- gray_object_queue_init (&workers_distribute_gray_queue);
+ gray_object_queue_init (&gray_queue, mono_sgen_get_unmanaged_allocator ());
+ gray_object_queue_init (&workers_distribute_gray_queue, mono_sgen_get_unmanaged_allocator ());
degraded_mode = 0;
DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs));
} \
} while (0)
+typedef struct _SgenInternalAllocator SgenInternalAllocator;
+
#define SGEN_GRAY_QUEUE_SECTION_SIZE (128 - 3)
/*
typedef void (*GrayQueueAllocPrepareFunc) (SgenGrayQueue*);
struct _SgenGrayQueue {
+ SgenInternalAllocator *allocator;
GrayQueueSection *first;
GrayQueueSection *free_list;
int balance;
#define SGEN_INTERNAL_FREELIST_NUM_SLOTS 30
-typedef struct _SgenInternalAllocator SgenInternalAllocator;
struct _SgenInternalAllocator {
SgenPinnedChunk *chunk_list;
SgenPinnedChunk *free_lists [SGEN_INTERNAL_FREELIST_NUM_SLOTS];
+ void *delayed_free_lists [SGEN_INTERNAL_FREELIST_NUM_SLOTS];
long small_internal_mem_bytes [INTERNAL_MEM_MAX];
};
void mono_sgen_init_internal_allocator (void) MONO_INTERNAL;
+SgenInternalAllocator* mono_sgen_get_unmanaged_allocator (void) MONO_INTERNAL;
+
const char* mono_sgen_internal_mem_type_name (int type) MONO_INTERNAL;
void mono_sgen_report_internal_mem_usage (void) MONO_INTERNAL;
void mono_sgen_report_internal_mem_usage_full (SgenInternalAllocator *alc) MONO_INTERNAL;
void* mono_sgen_alloc_internal_dynamic (size_t size, int type) MONO_INTERNAL;
void mono_sgen_free_internal_dynamic (void *addr, size_t size, int type) MONO_INTERNAL;
+void* mono_sgen_alloc_internal_fixed (SgenInternalAllocator *allocator, int type) MONO_INTERNAL;
+void mono_sgen_free_internal_fixed (SgenInternalAllocator *allocator, void *addr, int type) MONO_INTERNAL;
+
void* mono_sgen_alloc_internal_full (SgenInternalAllocator *allocator, size_t size, int type) MONO_INTERNAL;
void mono_sgen_free_internal_full (SgenInternalAllocator *allocator, void *addr, size_t size, int type) MONO_INTERNAL;
+void mono_sgen_free_internal_delayed (void *addr, int type, SgenInternalAllocator *thread_allocator) MONO_INTERNAL;
+
void mono_sgen_debug_printf (int level, const char *format, ...) MONO_INTERNAL;
void mono_sgen_internal_scan_objects (SgenInternalAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data) MONO_INTERNAL;
queue->free_list = section->next;
} else {
/* Allocate a new section */
- section = mono_sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE);
+ section = mono_sgen_alloc_internal_fixed (queue->allocator, INTERNAL_MEM_GRAY_QUEUE);
}
section->end = 0;
}
static void
-gray_object_free_queue_section (GrayQueueSection *section)
+gray_object_free_queue_section (GrayQueueSection *section, SgenInternalAllocator *thread_allocator)
{
- mono_sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE);
+ mono_sgen_free_internal_delayed (section, INTERNAL_MEM_GRAY_QUEUE, thread_allocator);
}
static inline gboolean
}
static void
-gray_object_queue_init (GrayQueue *queue)
+gray_object_queue_init (GrayQueue *queue, SgenInternalAllocator *allocator)
{
GrayQueueSection *section, *next;
int i;
g_assert (gray_object_queue_is_empty (queue));
DEBUG (9, g_assert (queue->balance == 0));
+ queue->allocator = allocator;
+
/* Free the extra sections allocated during the last collection */
i = 0;
for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next)
while (section->next) {
next = section->next;
section->next = next->next;
- gray_object_free_queue_section (next);
+ gray_object_free_queue_section (next, allocator);
}
}
static void
-gray_object_queue_init_with_alloc_prepare (GrayQueue *queue, GrayQueueAllocPrepareFunc func, void *data)
+gray_object_queue_init_with_alloc_prepare (GrayQueue *queue, SgenInternalAllocator *allocator, GrayQueueAllocPrepareFunc func, void *data)
{
- gray_object_queue_init (queue);
+ gray_object_queue_init (queue, allocator);
queue->alloc_prepare_func = func;
queue->alloc_prepare_data = data;
}
struct _SgenPinnedChunk {
SgenBlock block;
int num_pages;
+ SgenInternalAllocator *allocator;
int *page_sizes; /* a 0 means the page is still unused */
void **free_list;
SgenPinnedChunk *free_list_nexts [SGEN_INTERNAL_FREELIST_NUM_SLOTS];
double data[0];
};
-#ifdef SGEN_PARALLEL_MARK
-static LOCK_DECLARE (internal_allocator_mutex);
-#define LOCK_INTERNAL_ALLOCATOR pthread_mutex_lock (&internal_allocator_mutex)
-#define UNLOCK_INTERNAL_ALLOCATOR pthread_mutex_unlock (&internal_allocator_mutex)
-#else
-#define LOCK_INTERNAL_ALLOCATOR
-#define UNLOCK_INTERNAL_ALLOCATOR
-#endif
-
static long long pinned_chunk_bytes_alloced = 0;
static long long large_internal_bytes_alloced = 0;
alc->free_lists [slot] = chunk;
}
-/* LOCKING: if !managed, requires the internal allocator lock to be held */
static SgenPinnedChunk*
alloc_pinned_chunk (SgenInternalAllocator *alc, gboolean managed)
{
int offset;
int size = SGEN_PINNED_CHUNK_SIZE;
- if (managed)
- LOCK_INTERNAL_ALLOCATOR;
-
chunk = mono_sgen_alloc_os_memory_aligned (size, size, TRUE);
chunk->block.role = managed ? MEMORY_ROLE_PINNED : MEMORY_ROLE_INTERNAL;
chunk->block.next = alc->chunk_list;
alc->chunk_list = chunk;
- if (managed)
- UNLOCK_INTERNAL_ALLOCATOR;
+ chunk->allocator = alc;
return chunk;
}
return FALSE;
}
-/* LOCKING: assumes the internal allocator lock is held */
static void*
alloc_from_slot (SgenInternalAllocator *alc, int slot, int type)
{
alc->small_internal_mem_bytes [type] += size;
+ if (alc->delayed_free_lists [slot]) {
+ void **p;
+ do {
+ p = alc->delayed_free_lists [slot];
+ } while (SGEN_CAS_PTR (&alc->delayed_free_lists [slot], *p, p) != p);
+ memset (p, 0, size);
+ return p;
+ }
+
restart:
pchunk = alc->free_lists [slot];
if (pchunk) {
g_assert (fixed_type_freelist_slots [type] == -1);
- LOCK_INTERNAL_ALLOCATOR;
-
HEAVY_STAT (++stat_internal_alloc);
if (size > freelist_sizes [SGEN_INTERNAL_FREELIST_NUM_SLOTS - 1]) {
LargeInternalMemHeader *mh;
- UNLOCK_INTERNAL_ALLOCATOR;
-
size += sizeof (LargeInternalMemHeader);
mh = mono_sgen_alloc_os_memory (size, TRUE);
mh->magic = LARGE_INTERNAL_MEM_HEADER_MAGIC;
g_assert (size <= freelist_sizes [slot]);
res = alloc_from_slot (alc, slot, type);
- UNLOCK_INTERNAL_ALLOCATOR;
-
return res;
}
void*
-mono_sgen_alloc_internal (int type)
+mono_sgen_alloc_internal_fixed (SgenInternalAllocator *allocator, int type)
{
- void *res;
int slot = fixed_type_freelist_slots [type];
g_assert (slot >= 0);
+ return alloc_from_slot (allocator, slot, type);
+}
- LOCK_INTERNAL_ALLOCATOR;
- res = alloc_from_slot (&unmanaged_allocator, slot, type);
- UNLOCK_INTERNAL_ALLOCATOR;
-
- return res;
+void*
+mono_sgen_alloc_internal (int type)
+{
+ return mono_sgen_alloc_internal_fixed (&unmanaged_allocator, type);
}
void*
void **p = addr;
void *next;
- LOCK_INTERNAL_ALLOCATOR;
-
g_assert (addr >= (void*)pchunk && (char*)addr < (char*)pchunk + pchunk->num_pages * FREELIST_PAGESIZE);
if (type == INTERNAL_MEM_MANAGED)
g_assert (pchunk->block.role == MEMORY_ROLE_PINNED);
}
alc->small_internal_mem_bytes [type] -= freelist_sizes [slot];
-
- UNLOCK_INTERNAL_ALLOCATOR;
}
void
}
void
-mono_sgen_free_internal (void *addr, int type)
+mono_sgen_free_internal_fixed (SgenInternalAllocator *allocator, void *addr, int type)
{
int slot = fixed_type_freelist_slots [type];
g_assert (slot >= 0);
- free_from_slot (&unmanaged_allocator, addr, slot, type);
+ free_from_slot (allocator, addr, slot, type);
+}
+
+void
+mono_sgen_free_internal (void *addr, int type)
+{
+ mono_sgen_free_internal_fixed (&unmanaged_allocator, addr, type);
}
void
mono_sgen_free_internal_full (&unmanaged_allocator, addr, size, type);
}
+void
+mono_sgen_free_internal_delayed (void *addr, int type, SgenInternalAllocator *thread_allocator)
+{
+ SgenPinnedChunk *pchunk = (SgenPinnedChunk*)SGEN_PINNED_CHUNK_FOR_PTR (addr);
+ SgenInternalAllocator *alc = pchunk->allocator;
+ int slot;
+ void *next;
+
+ if (alc == thread_allocator) {
+ mono_sgen_free_internal_fixed (alc, addr, type);
+ return;
+ }
+
+ slot = fixed_type_freelist_slots [type];
+ g_assert (slot >= 0);
+
+ do {
+ next = alc->delayed_free_lists [slot];
+ *(void**)addr = next;
+ } while (SGEN_CAS_PTR (&alc->delayed_free_lists [slot], addr, next) != next);
+}
+
void
mono_sgen_dump_internal_mem_usage (FILE *heap_dump_file)
{
#endif
}
+SgenInternalAllocator*
+mono_sgen_get_unmanaged_allocator (void)
+{
+ return &unmanaged_allocator;
+}
+
void
mono_sgen_internal_scan_objects (SgenInternalAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data)
{
workers_thread_func (void *data_untyped)
{
WorkerData *data = data_untyped;
+ SgenInternalAllocator allocator;
+
+ memset (&allocator, 0, sizeof (allocator));
+
+ gray_object_queue_init_with_alloc_prepare (&data->private_gray_queue, &allocator,
+ workers_gray_queue_share_redirect, data);
for (;;) {
//g_print ("worker waiting for start %d\n", data->start_worker_sem);
workers_change_num_working (1);
}
- gray_object_queue_init (&data->private_gray_queue);
+ gray_object_queue_init (&data->private_gray_queue, &allocator);
MONO_SEM_POST (&workers_done_sem);
MONO_SEM_INIT (&workers_done_sem, 0);
workers_gc_thread_data.shared_buffer_increment = 1;
workers_gc_thread_data.shared_buffer_index = 0;
- gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue,
+ gray_object_queue_init_with_alloc_prepare (&workers_distribute_gray_queue, mono_sgen_get_unmanaged_allocator (),
workers_gray_queue_share_redirect, &workers_gc_thread_data);
g_assert (num_workers <= sizeof (workers_primes) / sizeof (workers_primes [0]));
for (i = 0; i < workers_num; ++i) {
workers_data [i].shared_buffer_increment = workers_primes [i];
workers_data [i].shared_buffer_index = 0;
- gray_object_queue_init_with_alloc_prepare (&workers_data [i].private_gray_queue,
- workers_gray_queue_share_redirect, &workers_data [i]);
}
mono_counters_register ("Shared buffer insert tries", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_shared_buffer_insert_tries);