#include <signal.h>
#include <errno.h>
#include <assert.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <time.h>
-#include <fcntl.h>
#include "metadata/metadata-internals.h"
#include "metadata/class-internals.h"
#include "metadata/gc-internal.h"
#include "metadata/monitor.h"
#include "metadata/threadpool-internals.h"
#include "metadata/mempool-internals.h"
+#include "metadata/marshal.h"
#include "utils/mono-mmap.h"
+#include "utils/mono-time.h"
#include "utils/mono-semaphore.h"
#include "utils/mono-counters.h"
-#ifdef HAVE_VALGRIND_MEMCHECK_H
-#include <valgrind/memcheck.h>
-#endif
+#include <mono/utils/memcheck.h>
#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
a = i,
static gboolean xdomain_checks = FALSE;
/* If not null, dump the heap after each collection into this file */
static FILE *heap_dump_file = NULL;
+/* If set, mark stacks conservatively, even if precise marking is possible */
+static gboolean conservative_stack_mark = FALSE;
/*
* Turning on heavy statistics will turn off the managed allocator and
#define MAX_DEBUG_LEVEL 8
#define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
-#define TV_DECLARE(name) struct timeval name
-#define TV_GETTIME(tv) gettimeofday (&(tv), NULL)
-#define TV_ELAPSED(start,end) (int)((((end).tv_sec - (start).tv_sec) * 1000000) + end.tv_usec - start.tv_usec)
+#define TV_DECLARE(name) gint64 name
+#define TV_GETTIME(tv) tv = mono_100ns_ticks ()
+#define TV_ELAPSED(start,end) (int)((end-start) / 10)
+
+#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
#define GC_BITS_PER_WORD (sizeof (mword) * 8)
}
}
-static inline gboolean
-is_maybe_half_constructed (MonoObject *o)
-{
- MonoClass *klass;
-
- klass = ((MonoVTable*)LOAD_VTABLE (o))->klass;
- if ((klass == mono_defaults.string_class && mono_string_length ((MonoString*)o) == 0) ||
- (klass->rank && mono_array_length ((MonoArray*)o) == 0))
- return TRUE;
- else
- return FALSE;
-}
-
/*
* ######################################################################
* ######## Global data.
#define MAJOR_SECTION_SIZE (128*1024)
#define BLOCK_FOR_OBJECT(o) ((Block*)(((mword)(o)) & ~(MAJOR_SECTION_SIZE - 1)))
#define MAJOR_SECTION_FOR_OBJECT(o) ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
-#define DEFAULT_MINOR_COLLECTION_SECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 3 / MAJOR_SECTION_SIZE)
+#define MIN_MINOR_COLLECTION_SECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 3 / MAJOR_SECTION_SIZE)
#define DEFAULT_LOS_COLLECTION_TARGET (DEFAULT_NURSERY_SIZE * 2)
/* to quickly find the head of an object pinned by a conservative address
* we keep track of the objects allocated for each SCAN_START_SIZE memory
static mword pagesize = 4096;
static mword nursery_size = DEFAULT_NURSERY_SIZE;
-static int section_size_used = 0;
static int degraded_mode = 0;
-static int minor_collection_section_allowance = DEFAULT_MINOR_COLLECTION_SECTION_ALLOWANCE;
+static int minor_collection_section_allowance = MIN_MINOR_COLLECTION_SECTION_ALLOWANCE;
static int minor_collection_sections_alloced = 0;
-static int sections_alloced = 0; /* will be reset frequently */
+static int num_major_sections = 0;
static LOSObject *los_object_list = NULL;
static mword los_memory_usage = 0;
char **tlab_temp_end_addr;
char **tlab_real_end_addr;
gpointer **store_remset_buffer_addr;
- int *store_remset_buffer_index_addr;
+ long *store_remset_buffer_index_addr;
RememberedSet *remset;
gpointer runtime_data;
gpointer stopped_ip; /* only valid if the thread is stopped */
char *tlab_temp_end;
char *tlab_real_end;
gpointer *store_remset_buffer;
- int store_remset_buffer_index;
+ long store_remset_buffer_index;
#endif
};
static __thread char *tlab_temp_end;
static __thread char *tlab_real_end;
static __thread gpointer *store_remset_buffer;
-static __thread int store_remset_buffer_index;
-/* Used by the managed allocator */
+static __thread long store_remset_buffer_index;
+/* Used by the managed allocator/wbarrier */
static __thread char **tlab_next_addr;
static __thread char *stack_end;
+static __thread long *store_remset_buffer_index_addr;
#endif
static char *nursery_next = NULL;
static char *nursery_frag_real_end = NULL;
static void mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end);
static void clear_remsets (void);
static void clear_tlabs (void);
-static char *find_tlab_next_from_address (char *addr);
typedef void (*ScanPinnedObjectCallbackFunc) (PinnedChunk*, char*, size_t, void*);
static void scan_pinned_objects (ScanPinnedObjectCallbackFunc callback, void *callback_data);
static void sweep_pinned_objects (void);
switch (d & 0x7) {
case DESC_TYPE_RUN_LENGTH: {
int first_set = (d >> 16) & 0xff;
- int num_set = (d >> 16) & 0xff;
+ int num_set = (d >> 24) & 0xff;
int i;
bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8);
} \
} while (0)
-static mword new_obj_references = 0;
-static mword obj_references_checked = 0;
-
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
- new_obj_references++; \
- /*printf ("bogus ptr %p found at %p in object %p (%s.%s)\n", *(ptr), (ptr), o, o->vtable->klass->name_space, o->vtable->klass->name);*/ \
- } else { \
- obj_references_checked++; \
- } \
- } while (0)
-
-/*
- * ######################################################################
- * ######## Detecting and removing garbage.
- * ######################################################################
- * This section of code deals with detecting the objects no longer in use
- * and reclaiming the memory.
- */
-
#define COUNT_OBJECT_TYPES do { \
switch (desc & 0x7) { \
case DESC_TYPE_STRING: type_str++; break; \
} \
} while (0)
+
+/*
+ * ######################################################################
+ * ######## Detecting and removing garbage.
+ * ######################################################################
+ * This section of code deals with detecting the objects no longer in use
+ * and reclaiming the memory.
+ */
+
+#if 0
+static mword new_obj_references = 0;
+static mword obj_references_checked = 0;
+
+#undef HANDLE_PTR
+#define HANDLE_PTR(ptr,obj) do { \
+ if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_next) { \
+ new_obj_references++; \
+ /*printf ("bogus ptr %p found at %p in object %p (%s.%s)\n", *(ptr), (ptr), o, o->vtable->klass->name_space, o->vtable->klass->name);*/ \
+ } else { \
+ obj_references_checked++; \
+ } \
+ } while (0)
+
static void __attribute__((noinline))
scan_area (char *start, char *end)
{
printf ("\tstrings: %d, runl: %d, vector: %d, bitmaps: %d, lbitmaps: %d, complex: %d\n",
type_str, type_rlen, type_vector, type_bitmap, type_lbit, type_complex);*/
}
+#endif
static gboolean
is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
if ((MonoObject*)*(ptr) == key) { \
- g_print ("found ref to %p in object %p (%s) at offset %d\n", \
+ g_print ("found ref to %p in object %p (%s) at offset %zd\n", \
key, (obj), safe_name ((obj)), ((char*)(ptr) - (char*)(obj))); \
} \
} while (0)
}
static void
-to_space_expand (void)
+new_to_space_section (void)
{
- if (to_space_section) {
- g_assert (to_space_top == to_space_section->end_data);
- g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_top);
-
- to_space_section->next_data = to_space_bumper;
- }
+ /* FIXME: if the current to_space_section is empty, we don't
+ have to allocate a new one */
to_space_section = alloc_major_section ();
to_space_bumper = to_space_section->next_data;
static void
to_space_set_next_data (void)
{
+ g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
to_space_section->next_data = to_space_bumper;
}
+static void
+to_space_expand (void)
+{
+ if (to_space_section) {
+ g_assert (to_space_top == to_space_section->end_data);
+ to_space_set_next_data ();
+ }
+
+ new_to_space_section ();
+}
+
+static void
+unset_to_space (void)
+{
+ /* between collections the to_space_bumper is invalidated
+ because degraded allocations might occur, so we set it to
+ NULL, just to make it explicit */
+ to_space_bumper = NULL;
+
+ /* don't unset to_space_section if we implement the FIXME in
+ new_to_space_section */
+ to_space_section = NULL;
+}
+
static gboolean
object_is_in_to_space (char *obj)
{
frag_size += ALLOC_ALIGN - 1;
frag_size &= ~(ALLOC_ALIGN - 1);
frag_start = (char*)pin_queue [i] + frag_size;
- /*
- * pin_queue [i] might point to a half-constructed string or vector whose
- * length field is not set. In that case, frag_start points inside the
- * (zero initialized) object. Find the end of the object by scanning forward.
- *
- */
- if (is_maybe_half_constructed (pin_queue [i])) {
- char *tlab_end;
-
- /* This is also hit for zero length arrays/strings */
-
- /* Find the end of the TLAB which contained this allocation */
- tlab_end = find_tlab_next_from_address (pin_queue [i]);
-
- if (tlab_end) {
- while ((frag_start < tlab_end) && *(mword*)frag_start == 0)
- frag_start += sizeof (mword);
- } else {
- /*
- * FIXME: The object is either not allocated in a TLAB, or it isn't a
- * half constructed object.
- */
- }
- }
}
nursery_last_pinned_end = frag_start;
frag_end = nursery_real_end;
static void
dump_occupied (char *start, char *end, char *section_start)
{
- fprintf (heap_dump_file, "<occupied offset=\"%d\" size=\"%d\"/>\n", start - section_start, end - start);
+ fprintf (heap_dump_file, "<occupied offset=\"%zd\" size=\"%zd\"/>\n", start - section_start, end - start);
}
static void
char *start = section->data;
char *end = section->data + section->size;
char *occ_start = NULL;
- int pin_slot = 0;
GCVTable *vt;
char *old_start = NULL; /* just for debugging */
- fprintf (heap_dump_file, "<section type=\"%s\" size=\"%d\">\n", type, section->size);
+ fprintf (heap_dump_file, "<section type=\"%s\" size=\"%zu\">\n", type, section->size);
while (start < end) {
guint size;
static void
dump_heap (const char *type, int num, const char *reason)
{
- static char *internal_mem_names [] = { "pin-queue", "fragment", "section", "scan-starts",
- "fin-table", "finalize-entry", "dislink-table",
- "dislink", "roots-table", "root-record", "statistics",
- "remset", "gray-queue", "store-remset" };
+ static char const *internal_mem_names [] = { "pin-queue", "fragment", "section", "scan-starts",
+ "fin-table", "finalize-entry", "dislink-table",
+ "dislink", "roots-table", "root-record", "statistics",
+ "remset", "gray-queue", "store-remset" };
GCMemSection *section;
LOSObject *bigobj;
fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
for (i = 0; i < INTERNAL_MEM_MAX; ++i)
fprintf (heap_dump_file, "<other-mem-usage type=\"%s\" size=\"%ld\"/>\n", internal_mem_names [i], small_internal_mem_bytes [i]);
- fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STACK]);
+ fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_STACK]);
/* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
- fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_OTHER]);
+ fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_OTHER]);
dump_section (nursery_section, "nursery");
char *orig_nursery_next;
Fragment *frag;
GCMemSection *section;
+ int old_num_major_sections = num_major_sections;
+ int sections_alloced;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
nursery_section->next_data = nursery_next;
- sections_alloced = 0;
+ if (!to_space_section) {
+ new_to_space_section ();
+ } else {
+ /* we might have done degraded allocation since the
+ last collection */
+ g_assert (to_space_bumper <= to_space_section->next_data);
+ to_space_bumper = to_space_section->next_data;
- to_space_expand ();
+ to_space_section->is_to_space = TRUE;
+ }
gray_object_queue_init ();
num_minor_gcs++;
commit_stats (GENERATION_NURSERY);
+ sections_alloced = num_major_sections - old_num_major_sections;
minor_collection_sections_alloced += sections_alloced;
return minor_collection_sections_alloced > minor_collection_section_allowance;
}
-static int
-count_major_sections (void)
-{
- GCMemSection *section;
- int count = 0;
-
- for (section = section_list; section; section = section->block.next)
- if (section != nursery_section)
- ++count;
- return count;
-}
-
static void
-scan_from_pinned_chunk_if_marked (PinnedChunk *chunk, char *obj, void *dummy)
+scan_from_pinned_chunk_if_marked (PinnedChunk *chunk, char *obj, size_t size, void *dummy)
{
if (object_is_pinned (obj))
scan_object (obj, NULL, (char*)-1);
int i;
PinnedChunk *chunk;
Fragment *frag;
- int count;
TV_DECLARE (all_atv);
TV_DECLARE (all_btv);
TV_DECLARE (atv);
char *heap_start = NULL;
char *heap_end = (char*)-1;
size_t copy_space_required = 0;
+ int old_num_major_sections = num_major_sections;
+ int num_major_sections_saved, save_target, allowance_target;
init_stats ();
TV_GETTIME (atv);
init_pinning ();
DEBUG (6, fprintf (gc_debug_file, "Collecting pinned addresses\n"));
- pin_from_roots (lowest_heap_address, highest_heap_address);
+ pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address);
optimize_pin_queue (0);
/*
if (start != end) {
pin_object (bigobj->data);
if (heap_dump_file)
- pin_stats_register_object ((char*) bigobj->data, safe_object_get_size (bigobj->data));
+ pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %zd from roots\n", bigobj->data, safe_name (bigobj->data), bigobj->size));
}
}
DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
- to_space_expand ();
+ new_to_space_section ();
gray_object_queue_init ();
/* the old generation doesn't need to be scanned (no remembered sets or card
/* all the objects in the heap */
finish_gray_stack (heap_start, heap_end, GENERATION_OLD);
+ unset_to_space ();
+
/* sweep the big objects list */
prevbo = NULL;
for (bigobj = los_object_list; bigobj;) {
commit_stats (GENERATION_OLD);
+ num_major_sections_saved = MAX (old_num_major_sections - num_major_sections, 1);
+
+ save_target = num_major_sections / 2;
+ allowance_target = save_target * minor_collection_sections_alloced / num_major_sections_saved;
+
+ minor_collection_section_allowance = MAX (MIN (allowance_target, num_major_sections), MIN_MINOR_COLLECTION_SECTION_ALLOWANCE);
+
+ /*
+ printf ("alloced %d saved %d target %d allowance %d\n",
+ minor_collection_sections_alloced, num_major_sections_saved, allowance_target,
+ minor_collection_section_allowance);
+ */
+
minor_collection_sections_alloced = 0;
- minor_collection_section_allowance = MAX (DEFAULT_MINOR_COLLECTION_SECTION_ALLOWANCE, count_major_sections () / 3);
}
/*
section->block.next = section_list;
section_list = section;
- ++sections_alloced;
+ ++num_major_sections;
return section;
}
free_internal_mem (section->scan_starts, INTERNAL_MEM_SCAN_STARTS);
free_os_memory (section, MAJOR_SECTION_SIZE);
total_alloc -= MAJOR_SECTION_SIZE - SIZEOF_GC_MEM_SECTION;
+
+ --num_major_sections;
}
/*
static void
free_os_memory (void *addr, size_t size)
{
- munmap (addr, size);
+ mono_vfree (addr, size);
}
/*
if (*ptr && (*ptr < (void*)chunk->start_data || *ptr > (void*)((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE))) {
pin_object (addr);
if (heap_dump_file)
- pin_stats_register_object ((char*) addr, safe_object_get_size (addr));
+ pin_stats_register_object ((char*) addr, safe_object_get_size ((MonoObject*) addr));
DEBUG (6, fprintf (gc_debug_file, "Marked pinned object %p (%s) from roots\n", addr, safe_name (addr)));
}
}
/* allocate the first page to the freelist */
chunk->page_sizes [0] = PINNED_FIRST_SLOT_SIZE;
build_freelist (chunk, slot_for_size (PINNED_FIRST_SLOT_SIZE), PINNED_FIRST_SLOT_SIZE, chunk->start_data, ((char*)chunk + FREELIST_PAGESIZE));
- DEBUG (4, fprintf (gc_debug_file, "Allocated pinned chunk %p, size: %zd\n", chunk, size));
+ DEBUG (4, fprintf (gc_debug_file, "Allocated pinned chunk %p, size: %d\n", chunk, size));
min_pinned_chunk_addr = MIN (min_pinned_chunk_addr, (char*)chunk->start_data);
max_pinned_chunk_addr = MAX (max_pinned_chunk_addr, ((char*)chunk + size));
return chunk;
return arr;
}
+void*
+mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
+{
+ MonoString *str;
+
+ LOCK_GC;
+
+ str = mono_gc_alloc_obj_nolock (vtable, size);
+ str->length = len;
+
+ UNLOCK_GC;
+
+ return str;
+}
+
/*
* To be used for interned strings and possibly MonoThread, reflection handles.
* We may want to explicitly free these objects.
void
mono_gc_conservatively_scan_area (void *start, void *end)
{
- g_assert_not_reached ();
- conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_OTHER);
+ conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end, PIN_TYPE_STACK);
}
void*
continue;
}
DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
- if (gc_callbacks.thread_mark_func)
+ if (gc_callbacks.thread_mark_func && !conservative_stack_mark)
gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
else if (!precise)
conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery, PIN_TYPE_STACK);
return FALSE;
}
-/* return TRUE if ptr points inside the managed heap */
-static gboolean
-ptr_in_heap (void* ptr)
-{
- mword p = (mword)ptr;
- LOSObject *bigobj;
- GCMemSection *section;
-
- if (!ADDR_IN_HEAP_BOUNDARIES (p))
- return FALSE;
-
- if (ptr_in_nursery (ptr))
- return TRUE;
-
- if (ptr_on_stack (ptr))
- return FALSE;
-
- for (section = section_list; section; section = section->block.next) {
- if (ptr >= (gpointer)section->data && ptr < (gpointer)(section->data + section->size))
- return TRUE;
- }
-
- if (obj_is_from_pinned_alloc (ptr))
- return TRUE;
-
- for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
- if (ptr >= (gpointer)bigobj->data && ptr < (gpointer)(bigobj->data + bigobj->size))
- return TRUE;
- }
-
- return FALSE;
-}
-
static mword*
handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global)
{
}
}
-/*
- * Find the tlab_next value of the TLAB which contains ADDR.
- */
-static char*
-find_tlab_next_from_address (char *addr)
-{
- SgenThreadInfo *info;
- int i;
-
- for (i = 0; i < THREAD_HASH_SIZE; ++i) {
- for (info = thread_table [i]; info; info = info->next) {
- /*
- * The allocator increments tlab_next before
- * checking whether that address is still in
- * the TLAB, so we have to check here.
- */
- char *next_addr = *info->tlab_next_addr;
- char *end_addr = *info->tlab_real_end_addr;
- if (next_addr > end_addr)
- next_addr = end_addr;
- if (addr >= *info->tlab_start_addr && addr < next_addr)
- return next_addr;
- }
- }
-
- return NULL;
-}
-
/* LOCKING: assumes the GC lock is held */
static SgenThreadInfo*
gc_register_current_thread (void *addr)
#ifdef HAVE_KW_THREAD
tlab_next_addr = &tlab_next;
+ store_remset_buffer_index_addr = &store_remset_buffer_index;
#endif
/* try to get it with attributes first */
}
}
-static char*
+/* for use in the debugger */
+char* find_object_for_ptr (char *ptr);
+char*
find_object_for_ptr (char *ptr)
{
GCMemSection *section;
{
GCVTable *vt;
int type_str = 0, type_rlen = 0, type_bitmap = 0, type_vector = 0, type_lbit = 0, type_complex = 0;
- new_obj_references = 0;
- obj_references_checked = 0;
while (start < end) {
if (!*(void**)start) {
start += sizeof (void*); /* should be ALLOC_ALIGN, really */
xdomain_checks = TRUE;
} else if (!strcmp (opt, "clear-at-gc")) {
nursery_clear_policy = CLEAR_AT_GC;
+ } else if (!strcmp (opt, "conservative-stack-mark")) {
+ conservative_stack_mark = TRUE;
} else if (g_str_has_prefix (opt, "heap-dump=")) {
char *filename = strchr (opt, '=') + 1;
nursery_clear_policy = CLEAR_AT_GC;
enum {
ATYPE_NORMAL,
ATYPE_VECTOR,
+ ATYPE_SMALL,
ATYPE_NUM
};
static gboolean registered = FALSE;
int tlab_next_addr_var, new_next_var;
int num_params, i;
+ const char *name = NULL;
+ AllocatorWrapperInfo *info;
#ifdef HAVE_KW_THREAD
int tlab_next_addr_offset = -1;
registered = TRUE;
}
- if (atype == ATYPE_NORMAL)
+ if (atype == ATYPE_SMALL) {
+ num_params = 1;
+ name = "AllocSmall";
+ } else if (atype == ATYPE_NORMAL) {
num_params = 1;
- else if (atype == ATYPE_VECTOR)
+ name = "Alloc";
+ } else if (atype == ATYPE_VECTOR) {
num_params = 2;
- else
+ name = "AllocVector";
+ } else {
g_assert_not_reached ();
+ }
csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
csig->ret = &mono_defaults.object_class->byval_arg;
for (i = 0; i < num_params; ++i)
csig->params [i] = &mono_defaults.int_class->byval_arg;
- mb = mono_mb_new (mono_defaults.object_class, "Alloc", MONO_WRAPPER_ALLOC);
+ mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
size_var = mono_mb_add_local (mb, &mono_defaults.int32_class->byval_arg);
- if (atype == ATYPE_NORMAL) {
+ if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
/* size = vtable->klass->instance_size; */
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
mono_mb_emit_stloc (mb, size_var);
} else if (atype == ATYPE_VECTOR) {
MonoExceptionClause *clause;
- int pos_leave;
+ int pos, pos_leave;
MonoClass *oom_exc_class;
MonoMethod *ctor;
+ /* n > MONO_ARRAY_MAX_INDEX -> OverflowException */
+ mono_mb_emit_ldarg (mb, 1);
+ mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
+ pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
+ mono_mb_emit_exception (mb, "OverflowException", NULL);
+ mono_mb_patch_short_branch (mb, pos);
+
clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
clause->try_offset = mono_mb_get_label (mb);
ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
g_assert (ctor);
+ mono_mb_emit_byte (mb, CEE_POP);
mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
mono_mb_emit_byte (mb, CEE_THROW);
mono_mb_emit_stloc (mb, size_var);
/* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
- mono_mb_emit_ldloc (mb, size_var);
- mono_mb_emit_icon (mb, MAX_SMALL_OBJ_SIZE);
- max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_S);
+ if (atype != ATYPE_SMALL) {
+ mono_mb_emit_ldloc (mb, size_var);
+ mono_mb_emit_icon (mb, MAX_SMALL_OBJ_SIZE);
+ max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_S);
+ }
/*
* We need to modify tlab_next, but the JIT only supports reading, so we read
slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
/* Slowpath */
-
- mono_mb_patch_short_branch (mb, max_size_branch);
+ if (atype != ATYPE_SMALL)
+ mono_mb_patch_short_branch (mb, max_size_branch);
mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
/* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_ldloc (mb, size_var);
- if (atype == ATYPE_NORMAL) {
+ if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
mono_mb_emit_icall (mb, mono_gc_alloc_obj);
} else if (atype == ATYPE_VECTOR) {
mono_mb_emit_ldarg (mb, 1);
res = mono_mb_create_method (mb, csig, 8);
mono_mb_free (mb);
mono_method_get_header (res)->init_locals = FALSE;
+
+ info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
+ info->alloc_type = atype;
+ mono_marshal_set_wrapper_info (res, info);
+
return res;
}
#endif
if (collect_before_allocs)
return NULL;
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
+ if (ALIGN_TO (klass->instance_size, ALLOC_ALIGN) < MAX_SMALL_OBJ_SIZE)
+ return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
+ else
+ return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
#else
return NULL;
#endif
#endif
}
-int
-mono_gc_get_managed_allocator_type (MonoMethod *managed_alloc)
-{
-#ifdef MANAGED_ALLOCATION
- int i;
-
- for (i = 0; i < ATYPE_NUM; ++i)
- if (managed_alloc == alloc_method_cache [i])
- return i;
-#endif
- g_assert_not_reached ();
- return -1;
-}
-
MonoMethod*
mono_gc_get_managed_allocator_by_type (int atype)
{
MonoMethodBuilder *mb;
MonoMethodSignature *sig;
#ifdef MANAGED_WBARRIER
- int label_no_wb, label_need_wb_1, label_need_wb_2, label2;
- int remset_var, next_var, dummy_var;
+ int label_no_wb_1, label_no_wb_2, label_no_wb_3, label_no_wb_4, label_need_wb, label_slow_path;
+ int buffer_var, buffer_index_var, dummy_var;
#ifdef HAVE_KW_THREAD
- int remset_offset = -1, stack_end_offset = -1;
+ int stack_end_offset = -1, store_remset_buffer_offset = -1;
+ int store_remset_buffer_index_offset = -1, store_remset_buffer_index_addr_offset = -1;
- MONO_THREAD_VAR_OFFSET (remembered_set, remset_offset);
MONO_THREAD_VAR_OFFSET (stack_end, stack_end_offset);
- g_assert (remset_offset != -1 && stack_end_offset != -1);
+ g_assert (stack_end_offset != -1);
+ MONO_THREAD_VAR_OFFSET (store_remset_buffer, store_remset_buffer_offset);
+ g_assert (store_remset_buffer_offset != -1);
+ MONO_THREAD_VAR_OFFSET (store_remset_buffer_index, store_remset_buffer_index_offset);
+ g_assert (store_remset_buffer_index_offset != -1);
+ MONO_THREAD_VAR_OFFSET (store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
+ g_assert (store_remset_buffer_index_addr_offset != -1);
#endif
#endif
#ifdef MANAGED_WBARRIER
if (mono_runtime_has_tls_get ()) {
- /* ptr_in_nursery () check */
#ifdef ALIGN_NURSERY
+ // if (ptr_in_nursery (ptr)) return;
/*
* Masking out the bits might be faster, but we would have to use 64 bit
* immediates, which might be slower.
mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
mono_mb_emit_byte (mb, CEE_SHR_UN);
mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
- label_no_wb = mono_mb_emit_branch (mb, CEE_BEQ);
+ label_no_wb_1 = mono_mb_emit_branch (mb, CEE_BEQ);
+
+ // if (!ptr_in_nursery (*ptr)) return;
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_byte (mb, CEE_LDIND_I);
+ mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
+ mono_mb_emit_byte (mb, CEE_SHR_UN);
+ mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
+ label_no_wb_2 = mono_mb_emit_branch (mb, CEE_BNE_UN);
#else
// FIXME:
g_assert_not_reached ();
#endif
- /* Need write barrier if ptr >= stack_end */
+ // if (ptr >= stack_end) goto need_wb;
mono_mb_emit_ldarg (mb, 0);
EMIT_TLS_ACCESS (mb, stack_end, stack_end_offset);
- label_need_wb_1 = mono_mb_emit_branch (mb, CEE_BGE_UN);
+ label_need_wb = mono_mb_emit_branch (mb, CEE_BGE_UN);
- /* Need write barrier if ptr < stack_start */
+ // if (ptr >= stack_start) return;
dummy_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_ldloc_addr (mb, dummy_var);
- label_need_wb_2 = mono_mb_emit_branch (mb, CEE_BLE_UN);
-
- /* Don't need write barrier case */
- mono_mb_patch_branch (mb, label_no_wb);
-
- mono_mb_emit_byte (mb, CEE_RET);
-
- /* Need write barrier case */
- mono_mb_patch_branch (mb, label_need_wb_1);
- mono_mb_patch_branch (mb, label_need_wb_2);
-
- // remset_var = remembered_set;
- remset_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
- EMIT_TLS_ACCESS (mb, remset, remset_offset);
- mono_mb_emit_stloc (mb, remset_var);
-
- // next_var = rs->store_next
- next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
- mono_mb_emit_ldloc (mb, remset_var);
- mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (RememberedSet, store_next));
- mono_mb_emit_byte (mb, CEE_LDIND_I);
- mono_mb_emit_stloc (mb, next_var);
-
- // if (rs->store_next < rs->end_set) {
- mono_mb_emit_ldloc (mb, next_var);
- mono_mb_emit_ldloc (mb, remset_var);
- mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (RememberedSet, end_set));
+ label_no_wb_3 = mono_mb_emit_branch (mb, CEE_BGE_UN);
+
+ // need_wb:
+ mono_mb_patch_branch (mb, label_need_wb);
+
+ // buffer = STORE_REMSET_BUFFER;
+ buffer_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ EMIT_TLS_ACCESS (mb, store_remset_buffer, store_remset_buffer_offset);
+ mono_mb_emit_stloc (mb, buffer_var);
+
+ // buffer_index = STORE_REMSET_BUFFER_INDEX;
+ buffer_index_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ EMIT_TLS_ACCESS (mb, store_remset_buffer_index, store_remset_buffer_index_offset);
+ mono_mb_emit_stloc (mb, buffer_index_var);
+
+ // if (buffer [buffer_index] == ptr) return;
+ mono_mb_emit_ldloc (mb, buffer_var);
+ mono_mb_emit_ldloc (mb, buffer_index_var);
+ g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
+ mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
+ mono_mb_emit_byte (mb, CEE_SHL);
+ mono_mb_emit_byte (mb, CEE_ADD);
mono_mb_emit_byte (mb, CEE_LDIND_I);
- label2 = mono_mb_emit_branch (mb, CEE_BGE);
-
- /* write barrier fast path */
- // *(rs->store_next++) = (mword)ptr;
- mono_mb_emit_ldloc (mb, next_var);
mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_byte (mb, CEE_STIND_I);
+ label_no_wb_4 = mono_mb_emit_branch (mb, CEE_BEQ);
- mono_mb_emit_ldloc (mb, next_var);
- mono_mb_emit_icon (mb, sizeof (gpointer));
+ // ++buffer_index;
+ mono_mb_emit_ldloc (mb, buffer_index_var);
+ mono_mb_emit_icon (mb, 1);
+ mono_mb_emit_byte (mb, CEE_ADD);
+ mono_mb_emit_stloc (mb, buffer_index_var);
+
+ // if (buffer_index >= STORE_REMSET_BUFFER_SIZE) goto slow_path;
+ mono_mb_emit_ldloc (mb, buffer_index_var);
+ mono_mb_emit_icon (mb, STORE_REMSET_BUFFER_SIZE);
+ label_slow_path = mono_mb_emit_branch (mb, CEE_BGE);
+
+ // buffer [buffer_index] = ptr;
+ mono_mb_emit_ldloc (mb, buffer_var);
+ mono_mb_emit_ldloc (mb, buffer_index_var);
+ g_assert (sizeof (gpointer) == 4 || sizeof (gpointer) == 8);
+ mono_mb_emit_icon (mb, sizeof (gpointer) == 4 ? 2 : 3);
+ mono_mb_emit_byte (mb, CEE_SHL);
mono_mb_emit_byte (mb, CEE_ADD);
- mono_mb_emit_stloc (mb, next_var);
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_byte (mb, CEE_STIND_I);
- mono_mb_emit_ldloc (mb, remset_var);
- mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (RememberedSet, store_next));
- mono_mb_emit_ldloc (mb, next_var);
+ // STORE_REMSET_BUFFER_INDEX = buffer_index;
+ EMIT_TLS_ACCESS (mb, store_remset_buffer_index_addr, store_remset_buffer_index_addr_offset);
+ mono_mb_emit_ldloc (mb, buffer_index_var);
mono_mb_emit_byte (mb, CEE_STIND_I);
- /* write barrier slow path */
- mono_mb_patch_branch (mb, label2);
+ // return;
+ mono_mb_patch_branch (mb, label_no_wb_1);
+ mono_mb_patch_branch (mb, label_no_wb_2);
+ mono_mb_patch_branch (mb, label_no_wb_3);
+ mono_mb_patch_branch (mb, label_no_wb_4);
+ mono_mb_emit_byte (mb, CEE_RET);
+
+ // slow path
+ mono_mb_patch_branch (mb, label_slow_path);
}
#endif