X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmetadata%2Fsgen-gc.c;h=3607182b8366f0637c4eaf21e098707866d401c2;hb=7f8a68150cd16aae4e59e49e1524c242da9cdad2;hp=1c1c20fcd9797a06a9d91c01f0fa36cfb90c2dfd;hpb=3d5198bcc65fa2f014d161856c936bb4b67ec609;p=mono.git diff --git a/mono/metadata/sgen-gc.c b/mono/metadata/sgen-gc.c index 1c1c20fcd97..3607182b836 100644 --- a/mono/metadata/sgen-gc.c +++ b/mono/metadata/sgen-gc.c @@ -4,7 +4,7 @@ * Author: * Paolo Molaro (lupus@ximian.com) * - * Copyright (C) 2005-2006 Novell, Inc + * Copyright 2005-2009 Novell, Inc (http://www.novell.com) * * Thread start/stop adapted from Boehm's GC: * Copyright (c) 1994 by Xerox Corporation. All rights reserved. @@ -99,7 +99,7 @@ Multi-dim arrays have the same issue for rank == 1 for the bounds data. *) implement a card table as the write barrier instead of remembered sets? *) some sort of blacklist support? - *) fin_ready_list is part of the root set, too + *) fin_ready_list and critical_fin_list are part of the root set, too *) consider lowering the large object min size to 16/32KB or so and benchmark *) once mark-compact is implemented we could still keep the copying collector for the old generation and use it if we think @@ -144,12 +144,24 @@ #include "metadata/threads.h" #include "metadata/sgen-gc.h" #include "metadata/mono-gc.h" +#include "metadata/method-builder.h" +#include "metadata/profiler-private.h" #include "utils/mono-mmap.h" #ifdef HAVE_VALGRIND_MEMCHECK_H #include #endif +#define OPDEF(a,b,c,d,e,f,g,h,i,j) \ + a = i, + +enum { +#include "mono/cil/opcode.def" + CEE_LAST +}; + +#undef OPDEF + /* * ###################################################################### * ######## Types and constants used by the GC. @@ -169,14 +181,16 @@ static gboolean collect_before_allocs = FALSE; /* If set, do a heap consistency check before each minor collection */ static gboolean consistency_check_at_minor_collection = FALSE; +/* void mono_gc_flush_info (void) { fflush (gc_debug_file); } +*/ -#define MAX_DEBUG_LEVEL 9 -#define DEBUG(level,a) do {if ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level) a;} while (0) +#define MAX_DEBUG_LEVEL 8 +#define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0) #define TV_DECLARE(name) struct timeval name #define TV_GETTIME(tv) gettimeofday (&(tv), NULL) @@ -281,6 +295,26 @@ struct _PinnedChunk { void *data [1]; /* page sizes and free lists are stored here */ }; +/* The method used to clear the nursery */ +/* Clearing at nursery collections is the safest, but has bad interactions with caches. + * Clearing at TLAB creation is much faster, but more complex and it might expose hard + * to find bugs. + */ +typedef enum { + CLEAR_AT_GC, + CLEAR_AT_TLAB_CREATION +} NurseryClearPolicy; + +static NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION; + +/* + * If this is set, the nursery is aligned to an address aligned to its size, ie. + * a 1MB nursery will be aligned to an address divisible by 1MB. This allows us to + * speed up ptr_in_nursery () checks which are very frequent. This requires the + * nursery size to be a compile time constant. + */ +#define ALIGN_NURSERY 1 + /* * The young generation is divided into fragments. This is because * we can hand one fragments to a thread for lock-less fast alloc and @@ -293,6 +327,8 @@ struct _PinnedChunk { * We should start assigning threads very small fragments: if there are many * threads the nursery will be full of reserved space that the threads may not * use at all, slowing down allocation speed. + * Thread local allocation is done from areas of memory Hotspot calls Thread Local + * Allocation Buffers (TLABs). */ typedef struct _Fragment Fragment; @@ -329,14 +365,20 @@ enum { REMSET_LOCATION, /* just a pointer to the exact location */ REMSET_RANGE, /* range of pointer fields */ REMSET_OBJECT, /* mark all the object for scanning */ - REMSET_VTYPE, /* a valuetype described by a gc descriptor */ + REMSET_OTHER, /* all others */ REMSET_TYPE_MASK = 0x3 }; +/* Subtypes of REMSET_OTHER */ +enum { + REMSET_VTYPE, /* a valuetype described by a gc descriptor */ + REMSET_ROOT_LOCATION, /* a location inside a root */ +}; + static __thread RememberedSet *remembered_set MONO_TLS_FAST; static pthread_key_t remembered_set_key; static RememberedSet *global_remset; -static int store_to_global_remset = 0; +//static int store_to_global_remset = 0; /* FIXME: later choose a size that takes into account the RememberedSet struct * and doesn't waste any alloc paddin space. @@ -381,6 +423,11 @@ typedef struct { ((mword*)(obj))[0] &= ~PINNED_BIT; \ } while (0) +#ifdef ALIGN_NURSERY +#define ptr_in_nursery(ptr) (((mword)(ptr) & ~((1 << DEFAULT_NURSERY_BITS) - 1)) == (mword)nursery_start) +#else +#define ptr_in_nursery(ptr) ((char*)(ptr) >= nursery_start && (char*)(ptr) < nursery_real_end) +#endif /* * Since we set bits in the vtable, use the macro to load it from the pointer to @@ -395,7 +442,7 @@ safe_name (void* obj) return vt->klass->name; } -static guint +static inline guint safe_object_get_size (MonoObject* o) { MonoClass *klass = ((MonoVTable*)LOAD_VTABLE (o))->klass; @@ -404,7 +451,7 @@ safe_object_get_size (MonoObject* o) } else if (klass->rank) { MonoArray *array = (MonoArray*)o; size_t size = sizeof (MonoArray) + mono_array_element_size (klass) * mono_array_length (array); - if (array->bounds) { + if (G_UNLIKELY (array->bounds)) { size += 3; size &= ~3; size += sizeof (MonoArrayBounds) * klass->rank; @@ -417,7 +464,7 @@ safe_object_get_size (MonoObject* o) } static inline gboolean -is_half_constructed (MonoObject *o) +is_maybe_half_constructed (MonoObject *o) { MonoClass *klass; @@ -442,6 +489,8 @@ static int num_major_gcs = 0; /* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */ //#define DEFAULT_NURSERY_SIZE (1024*512*125+4096*118) #define DEFAULT_NURSERY_SIZE (1024*512*2) +/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */ +#define DEFAULT_NURSERY_BITS 20 #define DEFAULT_MAX_SECTION (DEFAULT_NURSERY_SIZE * 16) #define DEFAULT_LOS_COLLECTION_TARGET (DEFAULT_NURSERY_SIZE * 2) /* to quickly find the head of an object pinned by a conservative address @@ -479,13 +528,25 @@ typedef struct _FinalizeEntry FinalizeEntry; struct _FinalizeEntry { FinalizeEntry *next; void *object; - void *data; /* can be a disappearing link or the data for the finalizer */ - /* Note we could use just one pointer if we don't support multiple callbacks - * for finalizers and per-finalizer data and if we store the obj pointers - * in the link like libgc does - */ }; +typedef struct _DisappearingLink DisappearingLink; +struct _DisappearingLink { + DisappearingLink *next; + void **link; +}; + +/* + * The link pointer is hidden by negating each bit. We use the lowest + * bit of the link (before negation) to store whether it needs + * resurrection tracking. + */ +#define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0)))) +#define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L)) + +#define DISLINK_OBJECT(d) (REVEAL_POINTER (*(d)->link)) +#define DISLINK_TRACK(d) ((~(gulong)(*(d)->link)) & 1) + /* * The finalizable hash has the object as the key, the * disappearing_link hash, has the link address as key. @@ -493,8 +554,8 @@ struct _FinalizeEntry { static FinalizeEntry **finalizable_hash = NULL; /* objects that are ready to be finalized */ static FinalizeEntry *fin_ready_list = NULL; -/* disappearing links use the same structure but a different list */ -static FinalizeEntry **disappearing_link_hash = NULL; +static FinalizeEntry *critical_fin_list = NULL; +static DisappearingLink **disappearing_link_hash = NULL; static mword disappearing_link_hash_size = 0; static mword finalizable_hash_size = 0; @@ -529,37 +590,66 @@ obj_is_from_pinned_alloc (char *p) return FALSE; } +enum { + ROOT_TYPE_NORMAL = 0, /* "normal" roots */ + ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */ + ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */ + ROOT_TYPE_NUM +}; + /* registered roots: the key to the hash is the root start address */ -static RootRecord **roots_hash = NULL; -static int roots_hash_size = 0; +/* + * Different kinds of roots are kept separate to speed up pin_from_roots () for example. + */ +static RootRecord **roots_hash [ROOT_TYPE_NUM] = { NULL, NULL }; +static int roots_hash_size [ROOT_TYPE_NUM] = { 0, 0, 0 }; static mword roots_size = 0; /* amount of memory in the root set */ -static int num_roots_entries = 0; +static int num_roots_entries [ROOT_TYPE_NUM] = { 0, 0, 0 }; /* * The current allocation cursors * We allocate objects in the nursery. * The nursery is the area between nursery_start and nursery_real_end. - * nursery_next is the pointer to the space where the next object will be allocated. - * nursery_temp_end is the pointer to the end of the temporary space reserved for - * the allocation: this allows us to allow allocations inside the fragments of the - * nursery (the empty holes between pinned objects) and it allows us to set the - * scan starts at reasonable intervals. - * nursery_next and nursery_temp_end will become per-thread vars to allow lock-free - * allocations. + * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated + * from nursery fragments. + * tlab_next is the pointer to the space inside the TLAB where the next object will + * be allocated. + * tlab_temp_end is the pointer to the end of the temporary space reserved for + * the allocation: it allows us to set the scan starts at reasonable intervals. + * tlab_real_end points to the end of the TLAB. + * nursery_frag_real_end points to the end of the currently used nursery fragment. * nursery_first_pinned_start points to the start of the first pinned object in the nursery * nursery_last_pinned_end points to the end of the last pinned object in the nursery * At the next allocation, the area of the nursery where objects can be present is * between MIN(nursery_first_pinned_start, first_fragment_start) and - * MAX(nursery_last_pinned_end, nursery_temp_end) + * MAX(nursery_last_pinned_end, nursery_frag_real_end) */ static char *nursery_start = NULL; + +/* + * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS + * variables for next+temp_end ? + */ +static __thread char *tlab_start; +static __thread char *tlab_next; +static __thread char *tlab_temp_end; +static __thread char *tlab_real_end; +/* Used by the managed allocator */ +static __thread char **tlab_next_addr; static char *nursery_next = NULL; -static char *nursery_temp_end = NULL; -static char *nursery_real_end = NULL; static char *nursery_frag_real_end = NULL; -static char *nursery_first_pinned_start = NULL; +static char *nursery_real_end = NULL; +//static char *nursery_first_pinned_start = NULL; static char *nursery_last_pinned_end = NULL; +/* The size of a TLAB */ +/* The bigger the value, the less often we have to go to the slow path to allocate a new + * one, but the more space is wasted by threads not allocating much memory. + * FIXME: Tune this. + * FIXME: Make this self-tuning for each thread. + */ +static guint32 tlab_size = (1024 * 4); + /* fragments that are free and ready to be used for allocation */ static Fragment *nursery_fragments = NULL; /* freeelist of fragment structures */ @@ -568,7 +658,7 @@ static Fragment *fragment_freelist = NULL; /* * used when moving the objects * When the nursery is collected, objects are copied to to_space. - * The area between to_space and gray_objects is used as a stack + * The area between gray_first and gray_objects is used as a stack * of objects that need their fields checked for more references * to be copied. * We should optimize somehow this mechanism to avoid rescanning @@ -576,6 +666,7 @@ static Fragment *fragment_freelist = NULL; * test cache misses and other graph traversal orders. */ static char *to_space = NULL; +static char *gray_first = NULL; static char *gray_objects = NULL; static char *to_space_end = NULL; static GCMemSection *to_space_section = NULL; @@ -583,6 +674,9 @@ static GCMemSection *to_space_section = NULL; /* objects bigger then this go into the large object space */ #define MAX_SMALL_OBJ_SIZE 0xffff +/* Functions supplied by the runtime to be called by the GC */ +static MonoGCCallbacks gc_callbacks; + /* * ###################################################################### * ######## Macros and function declarations. @@ -610,11 +704,11 @@ static void* get_internal_mem (size_t size); static void free_internal_mem (void *addr); static void* get_os_memory (size_t size, int activate); static void free_os_memory (void *addr, size_t size); -static void report_internal_mem_usage (void); +static G_GNUC_UNUSED void report_internal_mem_usage (void); static int stop_world (void); static int restart_world (void); -static void pin_thread_data (void *start_nursery, void *end_nursery); +static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise); static void scan_from_remsets (void *start_nursery, void *end_nursery); static void find_pinning_ref_from_thread (char *obj, size_t size); static void update_current_thread_stack (void *start); @@ -624,11 +718,16 @@ static void null_link_in_range (char *start, char *end); static gboolean search_fragment_for_size (size_t size); static void mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end); static void clear_remsets (void); +static void clear_tlabs (void); +static char *find_tlab_next_from_address (char *addr); static void sweep_pinned_objects (void); +static void scan_from_pinned_objects (char *addr_start, char *addr_end); static void free_large_object (LOSObject *obj); static void free_mem_section (GCMemSection *section); +void describe_ptr (char *ptr); void check_consistency (void); +char* check_object (char *start); /* * ###################################################################### @@ -690,22 +789,29 @@ enum { #define ALLOC_ALIGN 8 -/* Root bitmap descriptors are simpler: the lower two bits describe the type +/* Root bitmap descriptors are simpler: the lower three bits describe the type * and we either have 30/62 bitmap bits or nibble-based run-length, - * or a complex descriptor + * or a complex descriptor, or a user defined marker function. */ enum { ROOT_DESC_CONSERVATIVE, /* 0, so matches NULL value */ ROOT_DESC_BITMAP, - ROOT_DESC_RUN_LEN, - ROOT_DESC_LARGE_BITMAP, - ROOT_DESC_TYPE_MASK = 0x3, - ROOT_DESC_TYPE_SHIFT = 2, + ROOT_DESC_RUN_LEN, + ROOT_DESC_COMPLEX, + ROOT_DESC_USER, + ROOT_DESC_TYPE_MASK = 0x7, + ROOT_DESC_TYPE_SHIFT = 3, }; +#define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT)) + +#define MAX_USER_DESCRIPTORS 16 + static gsize* complex_descriptors = NULL; static int complex_descriptors_size = 0; static int complex_descriptors_next = 0; +static MonoGCMarkFunc user_descriptors [MAX_USER_DESCRIPTORS]; +static int user_descriptors_next = 0; static int alloc_complex_descriptor (gsize *bitmap, int numbits) @@ -770,7 +876,7 @@ mono_gc_make_descr_for_object (gsize *bitmap, int numbits, size_t obj_size) stored_size += ALLOC_ALIGN - 1; stored_size &= ~(ALLOC_ALIGN - 1); for (i = 0; i < numbits; ++i) { - if (bitmap [i / GC_BITS_PER_WORD] & (1 << (i % GC_BITS_PER_WORD))) { + if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) { if (first_set < 0) first_set = i; last_set = i; @@ -816,7 +922,7 @@ mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_ int first_set = -1, num_set = 0, last_set = -1, i; mword desc = vector? DESC_TYPE_VECTOR: DESC_TYPE_ARRAY; for (i = 0; i < numbits; ++i) { - if (elem_bitmap [i / GC_BITS_PER_WORD] & (1 << (i % GC_BITS_PER_WORD))) { + if (elem_bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) { if (first_set < 0) first_set = i; last_set = i; @@ -844,6 +950,41 @@ mono_gc_make_descr_for_array (int vector, gsize *elem_bitmap, int numbits, size_ return (void*) desc; } +/* Return the bitmap encoded by a descriptor */ +gsize* +mono_gc_get_bitmap_for_descr (void *descr, int *numbits) +{ + mword d = (mword)descr; + gsize *bitmap; + + switch (d & 0x7) { + case DESC_TYPE_RUN_LENGTH: { + int first_set = (d >> 16) & 0xff; + int num_set = (d >> 16) & 0xff; + int i; + + bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8); + + for (i = first_set; i < first_set + num_set; ++i) + bitmap [i / GC_BITS_PER_WORD] |= ((gsize)1 << (i % GC_BITS_PER_WORD)); + + *numbits = first_set + num_set; + + return bitmap; + } + case DESC_TYPE_SMALL_BITMAP: + bitmap = g_new0 (gsize, 1); + + bitmap [0] = (d >> SMALL_BITMAP_SHIFT) << OBJECT_HEADER_WORDS; + + *numbits = GC_BITS_PER_WORD; + + return bitmap; + default: + g_assert_not_reached (); + } +} + /* helper macros to scan and traverse objects, macros because we resue them in many functions */ #define STRING_SIZE(size,str) do { \ (size) = sizeof (MonoString) + 2 * (mono_string_length ((MonoString*)(str)) + 1); \ @@ -1035,6 +1176,7 @@ static mword obj_references_checked = 0; * This section of code deals with detecting the objects no longer in use * and reclaiming the memory. */ +#if 0 static void __attribute__((noinline)) scan_area (char *start, char *end) { @@ -1236,20 +1378,52 @@ mono_gc_clear_domain (MonoDomain * domain) /* FIXME: handle big and fixed objects (we remove, don't clear in this case) */ UNLOCK_GC; } +#endif +/* + * add_to_global_remset: + * + * The global remset contains locations which point into newspace after + * a minor collection. This can happen if the objects they point to are pinned. + */ static void -add_to_global_remset (gpointer ptr) +add_to_global_remset (gpointer ptr, gboolean root) { RememberedSet *rs; + DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr)); - if (global_remset->store_next < global_remset->end_set) { - *(global_remset->store_next++) = (mword)ptr; + + /* + * FIXME: If an object remains pinned, we need to add it at every minor collection. + * To avoid uncontrolled growth of the global remset, only add each pointer once. + */ + if (global_remset->store_next + 3 < global_remset->end_set) { + if (root) { + *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER; + *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION; + } else { + *(global_remset->store_next++) = (mword)ptr; + } return; } rs = alloc_remset (global_remset->end_set - global_remset->data, NULL); rs->next = global_remset; global_remset = rs; - *(global_remset->store_next++) = (mword)ptr; + if (root) { + *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER; + *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION; + } else { + *(global_remset->store_next++) = (mword)ptr; + } + + { + int global_rs_size = 0; + + for (rs = global_remset; rs; rs = rs->next) { + global_rs_size += rs->store_next - rs->data; + } + DEBUG (4, fprintf (gc_debug_file, "Global remset now has size %d\n", global_rs_size)); + } } /* @@ -1275,6 +1449,12 @@ add_to_global_remset (gpointer ptr) static char* __attribute__((noinline)) copy_object (char *obj, char *from_space_start, char *from_space_end) { + static void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 }; + + /* + * FIXME: The second set of checks is only needed if we are called for tospace + * objects too. + */ if (obj >= from_space_start && obj < from_space_end && (obj < to_space || obj >= to_space_end)) { MonoVTable *vt; char *forwarded; @@ -1300,13 +1480,35 @@ copy_object (char *obj, char *from_space_start, char *from_space_end) * At the end of major collections, we walk the los list and if * the object is pinned, it is marked, otherwise it can be freed. */ - if (objsize >= MAX_SMALL_OBJ_SIZE || (obj >= min_pinned_chunk_addr && obj < max_pinned_chunk_addr && obj_is_from_pinned_alloc (obj))) { + if (G_UNLIKELY (objsize >= MAX_SMALL_OBJ_SIZE || (obj >= min_pinned_chunk_addr && obj < max_pinned_chunk_addr && obj_is_from_pinned_alloc (obj)))) { DEBUG (9, fprintf (gc_debug_file, "Marked LOS/Pinned %p (%s), size: %zd\n", obj, safe_name (obj), objsize)); pin_object (obj); return obj; } /* ok, the object is not pinned, we can move it */ /* use a optimized memcpy here */ + if (objsize <= sizeof (gpointer) * 8) { + mword *dest = (mword*)gray_objects; + goto *copy_labels [objsize / sizeof (gpointer)]; + LAB_8: + (dest) [7] = ((mword*)obj) [7]; + LAB_7: + (dest) [6] = ((mword*)obj) [6]; + LAB_6: + (dest) [5] = ((mword*)obj) [5]; + LAB_5: + (dest) [4] = ((mword*)obj) [4]; + LAB_4: + (dest) [3] = ((mword*)obj) [3]; + LAB_3: + (dest) [2] = ((mword*)obj) [2]; + LAB_2: + (dest) [1] = ((mword*)obj) [1]; + LAB_1: + (dest) [0] = ((mword*)obj) [0]; + LAB_0: + ; + } else { #if 0 { int ecx; @@ -1322,10 +1524,11 @@ copy_object (char *obj, char *from_space_start, char *from_space_end) #else memcpy (gray_objects, obj, objsize); #endif + } /* adjust array->bounds */ vt = ((MonoObject*)obj)->vtable; g_assert (vt->gc_descr); - if (vt->rank && ((MonoArray*)obj)->bounds) { + if (G_UNLIKELY (vt->rank && ((MonoArray*)obj)->bounds)) { MonoArray *array = (MonoArray*)gray_objects; array->bounds = (MonoArrayBounds*)((char*)gray_objects + ((char*)((MonoArray*)obj)->bounds - (char*)obj)); DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %zd, rank: %d, length: %d\n", array, objsize, vt->rank, mono_array_length (array))); @@ -1343,12 +1546,12 @@ copy_object (char *obj, char *from_space_start, char *from_space_end) #undef HANDLE_PTR #define HANDLE_PTR(ptr,obj) do { \ - if (*(ptr)) { \ - void *__old = *(ptr); \ - *(ptr) = copy_object (*(ptr), from_start, from_end); \ + void *__old = *(ptr); \ + if (__old) { \ + *(ptr) = copy_object (__old, from_start, from_end); \ DEBUG (9, if (__old != *(ptr)) fprintf (gc_debug_file, "Overwrote field at %p with %p (was: %p)\n", (ptr), *(ptr), __old)); \ - if (*(ptr) >= (void*)from_start && *(ptr) < (void*)from_end) \ - add_to_global_remset ((ptr)); \ + if (G_UNLIKELY (*(ptr) >= (void*)from_start && *(ptr) < (void*)from_end) && !ptr_in_nursery (ptr)) \ + add_to_global_remset ((ptr), FALSE); \ } \ } while (0) @@ -1432,6 +1635,26 @@ scan_object (char *start, char* from_start, char* from_end) return NULL; } +/* + * drain_gray_stack: + * + * Scan objects in the gray stack until the stack is empty. This should be called + * frequently after each object is copied, to achieve better locality and cache + * usage. + */ +static void inline +drain_gray_stack (char *start_addr, char *end_addr) +{ + char *gray_start = gray_first; + + while (gray_start < gray_objects) { + DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", gray_start, safe_name (gray_start))); + gray_start = scan_object (gray_start, start_addr, end_addr); + } + + gray_first = gray_start; +} + /* * scan_vtype: * @@ -1601,7 +1824,7 @@ sort_addresses (void **array, int size) } } -static void +static G_GNUC_UNUSED void print_nursery_gaps (void* start_nursery, void *end_nursery) { int i; @@ -1716,7 +1939,7 @@ conservatively_pin_objects_from (void **start, void **end, void *start_nursery, * is no write in the old generation area where the pinned object is referenced * and we may not consider it as reachable. */ -static void +static G_GNUC_UNUSED void mark_pinned_objects (int generation) { } @@ -1724,14 +1947,14 @@ mark_pinned_objects (int generation) /* * Debugging function: find in the conservative roots where @obj is being pinned. */ -static void +static G_GNUC_UNUSED void find_pinning_reference (char *obj, size_t size) { RootRecord *root; int i; char *endobj = obj + size; - for (i = 0; i < roots_hash_size; ++i) { - for (root = roots_hash [i]; root; root = root->next) { + for (i = 0; i < roots_hash_size [0]; ++i) { + for (root = roots_hash [0][i]; root; root = root->next) { /* if desc is non-null it has precise info */ if (!root->root_desc) { char ** start = (char**)root->start_root; @@ -1757,13 +1980,10 @@ pin_from_roots (void *start_nursery, void *end_nursery) { RootRecord *root; int i; - DEBUG (3, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d entries)\n", (int)roots_size, num_roots_entries)); + DEBUG (2, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size, num_roots_entries [ROOT_TYPE_NORMAL], num_roots_entries [ROOT_TYPE_PINNED])); /* objects pinned from the API are inside these roots */ - for (i = 0; i < roots_hash_size; ++i) { - for (root = roots_hash [i]; root; root = root->next) { - /* if desc is non-null it has precise info */ - if (root->root_desc) - continue; + for (i = 0; i < roots_hash_size [ROOT_TYPE_PINNED]; ++i) { + for (root = roots_hash [ROOT_TYPE_PINNED][i]; root; root = root->next) { DEBUG (6, fprintf (gc_debug_file, "Pinned roots %p-%p\n", root->start_root, root->end_root)); conservatively_pin_objects_from ((void**)root->start_root, (void**)root->end_root, start_nursery, end_nursery); } @@ -1775,7 +1995,20 @@ pin_from_roots (void *start_nursery, void *end_nursery) * *) the _last_ managed stack frame * *) pointers slots in managed frames */ - pin_thread_data (start_nursery, end_nursery); + scan_thread_data (start_nursery, end_nursery, FALSE); +} + +/* Copy function called from user defined mark functions */ +static char *user_copy_n_start; +static char *user_copy_n_end; + +static void* +user_copy (void *addr) +{ + if (addr) + return copy_object (addr, user_copy_n_start, user_copy_n_end); + else + return NULL; } /* @@ -1793,14 +2026,45 @@ precisely_scan_objects_from (void** start_root, void** end_root, char* n_start, while (desc) { if ((desc & 1) && *start_root) { *start_root = copy_object (*start_root, n_start, n_end); - DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root)); \ + DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root)); + drain_gray_stack (n_start, n_end); } desc >>= 1; start_root++; } return; + case ROOT_DESC_COMPLEX: { + gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT); + int bwords = (*bitmap_data) - 1; + void **start_run = start_root; + bitmap_data++; + while (bwords-- > 0) { + gsize bmap = *bitmap_data++; + void **objptr = start_run; + while (bmap) { + if ((bmap & 1) && *objptr) { + *objptr = copy_object (*objptr, n_start, n_end); + DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr)); + drain_gray_stack (n_start, n_end); + } + bmap >>= 1; + ++objptr; + } + start_run += GC_BITS_PER_WORD; + } + break; + } + case ROOT_DESC_USER: { + MonoGCMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT]; + + user_copy_n_start = n_start; + user_copy_n_end = n_end; + marker (start_root, user_copy); + break; + } case ROOT_DESC_RUN_LEN: - case ROOT_DESC_LARGE_BITMAP: + g_assert_not_reached (); + default: g_assert_not_reached (); } } @@ -1830,6 +2094,7 @@ alloc_nursery (void) char *data; int scan_starts; Fragment *frag; + int alloc_size; if (nursery_section) return; @@ -1840,17 +2105,30 @@ alloc_nursery (void) */ /* FIXME: handle OOM */ section = get_internal_mem (sizeof (GCMemSection)); - data = get_os_memory (nursery_size, TRUE); - nursery_start = nursery_next = data; - nursery_real_end = data + nursery_size; - nursery_temp_end = data + SCAN_START_SIZE; + +#ifdef ALIGN_NURSERY + /* Allocate twice the memory to be able to put the nursery at an aligned address */ + g_assert (nursery_size == DEFAULT_NURSERY_SIZE); + + alloc_size = nursery_size * 2; + data = get_os_memory (alloc_size, TRUE); + nursery_start = (void*)(((mword)data + (1 << DEFAULT_NURSERY_BITS) - 1) & ~((1 << DEFAULT_NURSERY_BITS) - 1)); + g_assert ((char*)nursery_start + nursery_size <= ((char*)data + alloc_size)); + /* FIXME: Use the remaining size for something else, if it is big enough */ +#else + alloc_size = nursery_size; + data = get_os_memory (alloc_size, TRUE); + nursery_start = data; +#endif + nursery_real_end = nursery_start + nursery_size; UPDATE_HEAP_BOUNDARIES (nursery_start, nursery_real_end); - total_alloc += nursery_size; + nursery_next = nursery_start; + total_alloc += alloc_size; DEBUG (4, fprintf (gc_debug_file, "Expanding heap size: %zd, total: %zd\n", nursery_size, total_alloc)); section->data = section->next_data = data; - section->size = nursery_size; + section->size = alloc_size; section->end_data = nursery_real_end; - scan_starts = nursery_size / SCAN_START_SIZE; + scan_starts = alloc_size / SCAN_START_SIZE; section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts); section->num_scan_start = scan_starts; section->role = MEMORY_ROLE_GEN0; @@ -1870,15 +2148,26 @@ alloc_nursery (void) /* FIXME: frag here is lost */ } +static void +scan_finalizer_entries (FinalizeEntry *list, char *start, char *end) { + FinalizeEntry *fin; + + for (fin = list; fin; fin = fin->next) { + if (!fin->object) + continue; + DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object))); + fin->object = copy_object (fin->object, start, end); + } +} + /* * Update roots in the old generation. Since we currently don't have the * info from the write barriers, we just scan all the objects. */ -static void +static G_GNUC_UNUSED void scan_old_generation (char *start, char* end) { GCMemSection *section; - FinalizeEntry *fin; LOSObject *big_object; char *p; @@ -1903,10 +2192,8 @@ scan_old_generation (char *start, char* end) scan_object (big_object->data, start, end); } /* scan the list of objects ready for finalization */ - for (fin = fin_ready_list; fin; fin = fin->next) { - DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object))); - fin->object = copy_object (fin->object, start, end); - } + scan_finalizer_entries (fin_ready_list, start, end); + scan_finalizer_entries (critical_fin_list, start, end); } static mword fragment_total = 0; @@ -1921,7 +2208,8 @@ add_nursery_frag (size_t frag_size, char* frag_start, char* frag_end) Fragment *fragment; DEBUG (4, fprintf (gc_debug_file, "Found empty fragment: %p-%p, size: %zd\n", frag_start, frag_end, frag_size)); /* memsetting just the first chunk start is bound to provide better cache locality */ - memset (frag_start, 0, frag_size); + if (nursery_clear_policy == CLEAR_AT_GC) + memset (frag_start, 0, frag_size); /* Not worth dealing with smaller fragments: need to tune */ if (frag_size >= FRAGMENT_MIN_SIZE) { fragment = alloc_fragment (); @@ -1931,6 +2219,9 @@ add_nursery_frag (size_t frag_size, char* frag_start, char* frag_end) fragment->next = nursery_fragments; nursery_fragments = fragment; fragment_total += frag_size; + } else { + /* Clear unused fragments, pinning depends on this */ + memset (frag_start, 0, frag_size); } } @@ -1951,7 +2242,7 @@ scan_needed_big_objects (char *start_addr, char *end_addr) } static void -drain_gray_stack (char *start_addr, char *end_addr) +finish_gray_stack (char *start_addr, char *end_addr) { TV_DECLARE (atv); TV_DECLARE (btv); @@ -1968,16 +2259,15 @@ drain_gray_stack (char *start_addr, char *end_addr) * We need to walk the LO list as well in search of marked big objects * (use a flag since this is needed only on major collections). We need to loop * here as well, so keep a counter of marked LO (increasing it in copy_object). + * To achieve better cache locality and cache usage, we drain the gray stack + * frequently, after each object is copied, and just finish the work here. */ - TV_GETTIME (btv); - gray_start = to_space; - DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area: %p-%p, size: %d\n", gray_start, gray_objects, (int)(gray_objects - gray_start))); + gray_start = gray_first; while (gray_start < gray_objects) { DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", gray_start, safe_name (gray_start))); gray_start = scan_object (gray_start, start_addr, end_addr); } TV_GETTIME (atv); - DEBUG (2, fprintf (gc_debug_file, "Gray stack scan: %d usecs\n", TV_ELAPSED (btv, atv))); //scan_old_generation (start_addr, end_addr); DEBUG (2, fprintf (gc_debug_file, "Old generation done\n")); /* walk the finalization queue and move also the objects that need to be @@ -2020,7 +2310,7 @@ drain_gray_stack (char *start_addr, char *end_addr) static int last_num_pinned = 0; static void -build_nursery_fragments (int start_pin, int end_pin, char *nursery_last_allocated) +build_nursery_fragments (int start_pin, int end_pin) { char *frag_start, *frag_end; size_t frag_size; @@ -2051,10 +2341,23 @@ build_nursery_fragments (int start_pin, int end_pin, char *nursery_last_allocate * (zero initialized) object. Find the end of the object by scanning forward. * */ - if (is_half_constructed (pin_queue [i])) { - /* Can't use nursery_next as the limit as it is modified in collect_nursery () */ - while ((frag_start < nursery_last_allocated) && *(mword*)frag_start == 0) - frag_start += sizeof (mword); + if (is_maybe_half_constructed (pin_queue [i])) { + char *tlab_end; + + /* This is also hit for zero length arrays/strings */ + + /* Find the end of the TLAB which contained this allocation */ + tlab_end = find_tlab_next_from_address (pin_queue [i]); + + if (tlab_end) { + while ((frag_start < tlab_end) && *(mword*)frag_start == 0) + frag_start += sizeof (mword); + } else { + /* + * FIXME: The object is either not allocated in a TLAB, or it isn't a + * half constructed object. + */ + } } } nursery_last_pinned_end = frag_start; @@ -2069,6 +2372,11 @@ build_nursery_fragments (int start_pin, int end_pin, char *nursery_last_allocate } degraded_mode = 1; } + + nursery_next = nursery_frag_real_end = NULL; + + /* Clear TLABs for all threads */ + clear_tlabs (); } /* FIXME: later reduce code duplication here with the above @@ -2111,15 +2419,12 @@ build_section_fragments (GCMemSection *section) } static void -scan_from_registered_roots (char *addr_start, char *addr_end) +scan_from_registered_roots (char *addr_start, char *addr_end, int root_type) { int i; RootRecord *root; - for (i = 0; i < roots_hash_size; ++i) { - for (root = roots_hash [i]; root; root = root->next) { - /* if desc is non-null it has precise info */ - if (!root->root_desc) - continue; + for (i = 0; i < roots_hash_size [root_type]; ++i) { + for (root = roots_hash [root_type][i]; root; root = root->next) { DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc)); precisely_scan_objects_from ((void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc); } @@ -2135,14 +2440,15 @@ collect_nursery (size_t requested_size) GCMemSection *section; size_t max_garbage_amount; int i; - char *nursery_last_allocated; + char *orig_nursery_next; + Fragment *frag; TV_DECLARE (all_atv); TV_DECLARE (all_btv); TV_DECLARE (atv); TV_DECLARE (btv); degraded_mode = 0; - nursery_last_allocated = nursery_next; + orig_nursery_next = nursery_next; nursery_next = MAX (nursery_next, nursery_last_pinned_end); /* FIXME: optimize later to use the higher address where an object can be present */ nursery_next = MAX (nursery_next, nursery_real_end); @@ -2152,17 +2458,27 @@ collect_nursery (size_t requested_size) DEBUG (1, fprintf (gc_debug_file, "Start nursery collection %d %p-%p, size: %d\n", num_minor_gcs, nursery_start, nursery_next, (int)(nursery_next - nursery_start))); max_garbage_amount = nursery_next - nursery_start; + + /* Clear all remaining nursery fragments, pinning depends on this */ + if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) { + g_assert (orig_nursery_next <= nursery_frag_real_end); + memset (orig_nursery_next, 0, nursery_frag_real_end - orig_nursery_next); + for (frag = nursery_fragments; frag; frag = frag->next) { + memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start); + } + } + /* * not enough room in the old generation to store all the possible data from * the nursery in a single continuous space. * We reset to_space if we allocated objects in degraded mode. */ if (to_space_section) - to_space = gray_objects = to_space_section->next_data; + to_space = gray_objects = gray_first = to_space_section->next_data; if ((to_space_end - to_space) < max_garbage_amount) { section = alloc_section (nursery_section->size * 4); g_assert (nursery_section->size >= max_garbage_amount); - to_space = gray_objects = section->next_data; + to_space = gray_objects = gray_first = section->next_data; to_space_end = section->end_data; to_space_section = section; } @@ -2192,7 +2508,6 @@ collect_nursery (size_t requested_size) /* we don't have complete write barrier yet, so we scan all the old generation sections */ TV_GETTIME (atv); DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (btv, atv))); - /* FIXME: later scan also alloc_pinned objects */ /* the pinned objects are roots */ for (i = 0; i < next_pin_slot; ++i) { @@ -2200,17 +2515,20 @@ collect_nursery (size_t requested_size) scan_object (pin_queue [i], nursery_start, nursery_next); } /* registered roots, this includes static fields */ - scan_from_registered_roots (nursery_start, nursery_next); + scan_from_registered_roots (nursery_start, nursery_next, ROOT_TYPE_NORMAL); + scan_thread_data (nursery_start, nursery_next, TRUE); + /* alloc_pinned objects */ + scan_from_pinned_objects (nursery_start, nursery_next); TV_GETTIME (btv); DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (atv, btv))); - drain_gray_stack (nursery_start, nursery_next); + finish_gray_stack (nursery_start, nursery_next); /* walk the pin_queue, build up the fragment list of free memory, unmark * pinned objects as we go, memzero() the empty fragments so they are ready for the * next allocations. */ - build_nursery_fragments (0, next_pin_slot, nursery_last_allocated); + build_nursery_fragments (0, next_pin_slot); TV_GETTIME (atv); DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %zd bytes available\n", TV_ELAPSED (btv, atv), fragment_total)); @@ -2220,7 +2538,7 @@ collect_nursery (size_t requested_size) /* prepare the pin queue for the next collection */ last_num_pinned = next_pin_slot; next_pin_slot = 0; - if (fin_ready_list) { + if (fin_ready_list || critical_fin_list) { DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers)); mono_gc_finalize_notify (); } @@ -2233,7 +2551,7 @@ major_collection (void) LOSObject *bigobj, *prevbo; int i; PinnedChunk *chunk; - FinalizeEntry *fin; + Fragment *frag; int count; TV_DECLARE (all_atv); TV_DECLARE (all_btv); @@ -2250,6 +2568,16 @@ major_collection (void) DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs)); num_major_gcs++; mono_stats.major_gc_count ++; + + /* Clear all remaining nursery fragments, pinning depends on this */ + if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) { + g_assert (nursery_next <= nursery_frag_real_end); + memset (nursery_next, 0, nursery_frag_real_end - nursery_next); + for (frag = nursery_fragments; frag; frag = frag->next) { + memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start); + } + } + /* * FIXME: implement Mark/Compact * Until that is done, we can just apply mostly the same alg as for the nursery: @@ -2314,7 +2642,7 @@ major_collection (void) /* allocate the big to space */ DEBUG (4, fprintf (gc_debug_file, "Allocate tospace for size: %zd\n", copy_space_required)); section = alloc_section (copy_space_required); - to_space = gray_objects = section->next_data; + to_space = gray_objects = gray_first = section->next_data; to_space_end = section->end_data; to_space_section = section; @@ -2330,23 +2658,25 @@ major_collection (void) scan_object (pin_queue [i], heap_start, heap_end); } /* registered roots, this includes static fields */ - scan_from_registered_roots (heap_start, heap_end); - + scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_NORMAL); + scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_WBARRIER); + /* Threads */ + scan_thread_data (heap_start, heap_end, TRUE); + /* alloc_pinned objects */ + scan_from_pinned_objects (heap_start, heap_end); /* scan the list of objects ready for finalization */ - for (fin = fin_ready_list; fin; fin = fin->next) { - DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object))); - fin->object = copy_object (fin->object, heap_start, heap_end); - } + scan_finalizer_entries (fin_ready_list, heap_start, heap_end); + scan_finalizer_entries (critical_fin_list, heap_start, heap_end); TV_GETTIME (atv); DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv))); /* we need to go over the big object list to see if any was marked and scan it * And we need to make this in a loop, considering that objects referenced by finalizable - * objects could reference big objects (this happens in drain_gray_stack ()) + * objects could reference big objects (this happens in finish_gray_stack ()) */ scan_needed_big_objects (heap_start, heap_end); /* all the objects in the heap */ - drain_gray_stack (heap_start, heap_end); + finish_gray_stack (heap_start, heap_end); /* sweep the big objects list */ prevbo = NULL; @@ -2404,13 +2734,13 @@ major_collection (void) * pinned objects as we go, memzero() the empty fragments so they are ready for the * next allocations. */ - build_nursery_fragments (nursery_section->pin_queue_start, nursery_section->pin_queue_end, nursery_next); + build_nursery_fragments (nursery_section->pin_queue_start, nursery_section->pin_queue_end); TV_GETTIME (all_btv); mono_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv); /* prepare the pin queue for the next collection */ next_pin_slot = 0; - if (fin_ready_list) { + if (fin_ready_list || critical_fin_list) { DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers)); mono_gc_finalize_notify (); } @@ -2496,7 +2826,7 @@ minor_collect_or_expand_inner (size_t size) if (!search_fragment_for_size (size)) { int i; /* TypeBuilder and MonoMethod are killing mcs with fragmentation */ - DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)", size, last_num_pinned)); + DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size, last_num_pinned)); for (i = 0; i < last_num_pinned; ++i) { DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", pin_queue [i], safe_name (pin_queue [i]), safe_object_get_size (pin_queue [i]))); } @@ -2573,7 +2903,7 @@ report_pinned_chunk (PinnedChunk *chunk, int seq) { /* * Debug reporting. */ -static void +static G_GNUC_UNUSED void report_internal_mem_usage (void) { PinnedChunk *chunk; int i; @@ -2642,7 +2972,7 @@ sweep_pinned_objects (void) void *end_chunk; for (chunk = pinned_chunk_list; chunk; chunk = chunk->next) { end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE; - DEBUG (6, fprintf (gc_debug_file, "Sweeping pinned chunk %p (ranhe: %p-%p)\n", chunk, chunk->start_data, end_chunk)); + DEBUG (6, fprintf (gc_debug_file, "Sweeping pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk)); for (i = 0; i < chunk->num_pages; ++i) { obj_size = chunk->page_sizes [i]; if (!obj_size) @@ -2669,6 +2999,40 @@ sweep_pinned_objects (void) } } +static void +scan_from_pinned_objects (char *addr_start, char *addr_end) +{ + PinnedChunk *chunk; + int i, obj_size; + char *p, *endp; + void **ptr; + void *end_chunk; + for (chunk = pinned_chunk_list; chunk; chunk = chunk->next) { + end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE; + DEBUG (6, fprintf (gc_debug_file, "Scanning pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk)); + for (i = 0; i < chunk->num_pages; ++i) { + obj_size = chunk->page_sizes [i]; + if (!obj_size) + continue; + p = i? (char*)chunk + i * FREELIST_PAGESIZE: chunk->start_data; + endp = i? p + FREELIST_PAGESIZE: (char*)chunk + FREELIST_PAGESIZE; + DEBUG (6, fprintf (gc_debug_file, "Page %d (size: %d, range: %p-%p)\n", i, obj_size, p, endp)); + while (p + obj_size <= endp) { + ptr = (void**)p; + DEBUG (9, fprintf (gc_debug_file, "Considering %p (vtable: %p)\n", ptr, *ptr)); + /* if the first word (the vtable) is outside the chunk we have an object */ + if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk)) { + DEBUG (6, fprintf (gc_debug_file, "Precise object scan %d of alloc_pinned %p (%s)\n", i, ptr, safe_name (ptr))); + // FIXME: Put objects without references into separate chunks + // which do not need to be scanned + scan_object ((char*)ptr, addr_start, addr_end); + } + p += obj_size; + } + } + } +} + /* * Find the slot number in the freelist for memory chunks that * can contain @size objects. @@ -2941,6 +3305,11 @@ search_fragment_for_size (size_t size) { Fragment *frag, *prev; DEBUG (4, fprintf (gc_debug_file, "Searching nursery fragment %p, size: %zd\n", nursery_frag_real_end, size)); + + if (nursery_frag_real_end > nursery_next && nursery_clear_policy == CLEAR_AT_TLAB_CREATION) + /* Clear the remaining space, pinning depends on this */ + memset (nursery_next, 0, nursery_frag_real_end - nursery_next); + prev = NULL; for (frag = nursery_fragments; frag; frag = frag->next) { if (size <= (frag->fragment_end - frag->fragment_start)) { @@ -2951,7 +3320,6 @@ search_fragment_for_size (size_t size) nursery_fragments = frag->next; nursery_next = frag->fragment_start; nursery_frag_real_end = frag->fragment_end; - nursery_temp_end = MIN (nursery_frag_real_end, nursery_next + size + SCAN_START_SIZE); DEBUG (4, fprintf (gc_debug_file, "Using nursery fragment %p-%p, size: %zd (req: %zd)\n", nursery_next, nursery_frag_real_end, nursery_frag_real_end - nursery_next, size)); frag->next = fragment_freelist; @@ -3002,17 +3370,20 @@ mono_gc_alloc_obj (MonoVTable *vtable, size_t size) { /* FIXME: handle OOM */ void **p; + char *new_next; int dummy; + gboolean res; size += ALLOC_ALIGN - 1; size &= ~(ALLOC_ALIGN - 1); g_assert (vtable->gc_descr); - LOCK_GC; - if (collect_before_allocs) { + if (G_UNLIKELY (collect_before_allocs)) { int dummy; if (nursery_section) { + LOCK_GC; + update_current_thread_stack (&dummy); stop_world (); collect_nursery (0); @@ -3021,62 +3392,137 @@ mono_gc_alloc_obj (MonoVTable *vtable, size_t size) // FIXME: g_assert_not_reached (); } + UNLOCK_GC; } } - p = (void**)nursery_next; + /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */ + + p = (void**)tlab_next; /* FIXME: handle overflow */ - nursery_next += size; - if (nursery_next >= nursery_temp_end) { - /* there are two cases: the object is too big or we need to collect */ - /* there can be another case (from ORP), if we cooperate with the runtime a bit: - * objects that need finalizers can have the high bit set in their size - * so the above check fails and we can readily add the object to the queue. - * This avoids taking again the GC lock when registering, but this is moot when - * doing thread-local allocation, so it may not be a good idea. + new_next = (char*)p + size; + tlab_next = new_next; + + if (G_LIKELY (new_next < tlab_temp_end)) { + /* Fast path */ + + /* + * FIXME: We might need a memory barrier here so the change to tlab_next is + * visible before the vtable store. */ - if (size > MAX_SMALL_OBJ_SIZE) { - /* get ready for possible collection */ - update_current_thread_stack (&dummy); - nursery_next -= size; - p = alloc_large_inner (vtable, size); - } else { - if (nursery_next >= nursery_frag_real_end) { - nursery_next -= size; - /* when running in degraded mode, we continue allocing that way - * for a while, to decrease the number of useless nursery collections. - */ - if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) { - p = alloc_degraded (vtable, size); - UNLOCK_GC; - return p; - } - if (!search_fragment_for_size (size)) { - /* get ready for possible collection */ - update_current_thread_stack (&dummy); - minor_collect_or_expand_inner (size); - if (degraded_mode) { - p = alloc_degraded (vtable, size); - UNLOCK_GC; - return p; + + DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size)); + *p = vtable; + + return p; + } + + /* Slow path */ + + /* there are two cases: the object is too big or we run out of space in the TLAB */ + /* we also reach here when the thread does its first allocation after a minor + * collection, since the tlab_ variables are initialized to NULL. + * there can be another case (from ORP), if we cooperate with the runtime a bit: + * objects that need finalizers can have the high bit set in their size + * so the above check fails and we can readily add the object to the queue. + * This avoids taking again the GC lock when registering, but this is moot when + * doing thread-local allocation, so it may not be a good idea. + */ + LOCK_GC; + if (size > MAX_SMALL_OBJ_SIZE) { + /* get ready for possible collection */ + update_current_thread_stack (&dummy); + tlab_next -= size; + p = alloc_large_inner (vtable, size); + } else { + if (tlab_next >= tlab_real_end) { + /* + * Run out of space in the TLAB. When this happens, some amount of space + * remains in the TLAB, but not enough to satisfy the current allocation + * request. Currently, we retire the TLAB in all cases, later we could + * keep it if the remaining space is above a treshold, and satisfy the + * allocation directly from the nursery. + */ + tlab_next -= size; + /* when running in degraded mode, we continue allocing that way + * for a while, to decrease the number of useless nursery collections. + */ + if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) { + p = alloc_degraded (vtable, size); + UNLOCK_GC; + return p; + } + + if (size > tlab_size) { + /* Allocate directly from the nursery */ + if (nursery_next + size >= nursery_frag_real_end) { + if (!search_fragment_for_size (size)) { + /* get ready for possible collection */ + update_current_thread_stack (&dummy); + minor_collect_or_expand_inner (size); + if (degraded_mode) { + p = alloc_degraded (vtable, size); + UNLOCK_GC; + return p; + } } } - /* nursery_next changed by minor_collect_or_expand_inner () */ + p = (void*)nursery_next; nursery_next += size; - if (nursery_next > nursery_temp_end) { + if (nursery_next > nursery_frag_real_end) { // no space left g_assert (0); } + + if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) + memset (p, 0, size); } else { - /* record the scan start so we can find pinned objects more easily */ + if (tlab_start) + DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", tlab_start, tlab_real_end, (long)(tlab_real_end - tlab_next - size))); + + if (nursery_next + tlab_size >= nursery_frag_real_end) { + res = search_fragment_for_size (tlab_size); + if (!res) { + /* get ready for possible collection */ + update_current_thread_stack (&dummy); + minor_collect_or_expand_inner (tlab_size); + if (degraded_mode) { + p = alloc_degraded (vtable, size); + UNLOCK_GC; + return p; + } + } + } + + /* Allocate a new TLAB from the current nursery fragment */ + tlab_start = nursery_next; + nursery_next += tlab_size; + tlab_next = tlab_start; + tlab_real_end = tlab_start + tlab_size; + tlab_temp_end = tlab_start + MIN (SCAN_START_SIZE, tlab_size); + + if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) + memset (tlab_start, 0, tlab_size); + + /* Allocate from the TLAB */ + p = (void*)tlab_next; + tlab_next += size; + g_assert (tlab_next <= tlab_real_end); + nursery_section->scan_starts [((char*)p - (char*)nursery_section->data)/SCAN_START_SIZE] = (char*)p; - /* we just bump nursery_temp_end as well */ - nursery_temp_end = MIN (nursery_frag_real_end, nursery_next + SCAN_START_SIZE); - DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", nursery_next, nursery_temp_end)); } + } else { + /* Reached tlab_temp_end */ + + /* record the scan start so we can find pinned objects more easily */ + nursery_section->scan_starts [((char*)p - (char*)nursery_section->data)/SCAN_START_SIZE] = (char*)p; + /* we just bump tlab_temp_end as well */ + tlab_temp_end = MIN (tlab_real_end, tlab_next + SCAN_START_SIZE); + DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", tlab_next, tlab_temp_end)); } } + DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size)); *p = vtable; @@ -3124,6 +3570,32 @@ mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size) */ #define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj)) +static gboolean +is_critical_finalizer (FinalizeEntry *entry) +{ + MonoObject *obj; + MonoClass *class; + + if (!mono_defaults.critical_finalizer_object) + return FALSE; + + obj = entry->object; + class = ((MonoVTable*)LOAD_VTABLE (obj))->klass; + + return mono_class_has_parent (class, mono_defaults.critical_finalizer_object); +} + +static void +queue_finalization_entry (FinalizeEntry *entry) { + if (is_critical_finalizer (entry)) { + entry->next = critical_fin_list; + critical_fin_list = entry; + } else { + entry->next = fin_ready_list; + fin_ready_list = entry; + } +} + static void finalize_in_range (char *start, char *end) { @@ -3146,8 +3618,7 @@ finalize_in_range (char *start, char *end) next = entry->next; num_ready_finalizers++; num_registered_finalizers--; - entry->next = fin_ready_list; - fin_ready_list = entry; + queue_finalization_entry (entry); /* Make it survive */ from = entry->object; entry->object = copy_object (entry->object, start, end); @@ -3169,36 +3640,41 @@ finalize_in_range (char *start, char *end) static void null_link_in_range (char *start, char *end) { - FinalizeEntry *entry, *prev; + DisappearingLink *entry, *prev; int i; for (i = 0; i < disappearing_link_hash_size; ++i) { prev = NULL; for (entry = disappearing_link_hash [i]; entry;) { - if ((char*)entry->object >= start && (char*)entry->object < end && ((char*)entry->object < to_space || (char*)entry->object >= to_space_end)) { - if (object_is_fin_ready (entry->object)) { - void **p = entry->data; - FinalizeEntry *old; + char *object = DISLINK_OBJECT (entry); + if (object >= start && object < end && (object < to_space || object >= to_space_end)) { + if (!DISLINK_TRACK (entry) && object_is_fin_ready (object)) { + void **p = entry->link; + DisappearingLink *old; *p = NULL; /* remove from list */ if (prev) prev->next = entry->next; else disappearing_link_hash [i] = entry->next; - DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", p, entry->object)); + DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", p, object)); old = entry->next; free_internal_mem (entry); entry = old; num_disappearing_links--; continue; } else { - void **link; /* update pointer if it's moved * FIXME: what if an object is moved earlier? */ - entry->object = copy_object (entry->object, start, end); - DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", entry->data, entry->object)); - link = entry->data; - *link = entry->object; + /* We set the track + * resurrection bit to FALSE + * here so that the object can + * be collected in the next + * cycle (i.e. after it was + * finalized). + */ + *entry->link = HIDE_POINTER (copy_object (object, start, end), FALSE); + DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", entry->link, DISLINK_OBJECT (entry))); } } prev = entry; @@ -3288,6 +3764,7 @@ mono_gc_register_for_finalization (MonoObject *obj, void *user_data) unsigned int hash; if (no_finalize) return; + g_assert (user_data == NULL || user_data == mono_gc_run_finalize); hash = mono_object_hash (obj); LOCK_GC; if (num_registered_finalizers >= finalizable_hash_size * 2) @@ -3296,9 +3773,7 @@ mono_gc_register_for_finalization (MonoObject *obj, void *user_data) prev = NULL; for (entry = finalizable_hash [hash]; entry; entry = entry->next) { if (entry->object == obj) { - if (user_data) { - entry->data = user_data; - } else { + if (!user_data) { /* remove from the list */ if (prev) prev->next = entry->next; @@ -3320,7 +3795,6 @@ mono_gc_register_for_finalization (MonoObject *obj, void *user_data) } entry = get_internal_mem (sizeof (FinalizeEntry)); entry->object = obj; - entry->data = user_data; entry->next = finalizable_hash [hash]; finalizable_hash [hash] = entry; num_registered_finalizers++; @@ -3333,14 +3807,14 @@ rehash_dislink (void) { int i; unsigned int hash; - FinalizeEntry **new_hash; - FinalizeEntry *entry, *next; + DisappearingLink **new_hash; + DisappearingLink *entry, *next; int new_size = g_spaced_primes_closest (num_disappearing_links); - new_hash = get_internal_mem (new_size * sizeof (FinalizeEntry*)); + new_hash = get_internal_mem (new_size * sizeof (DisappearingLink*)); for (i = 0; i < disappearing_link_hash_size; ++i) { for (entry = disappearing_link_hash [i]; entry; entry = next) { - hash = mono_aligned_addr_hash (entry->data) % new_size; + hash = mono_aligned_addr_hash (entry->link) % new_size; next = entry->next; entry->next = new_hash [hash]; new_hash [hash] = entry; @@ -3352,9 +3826,9 @@ rehash_dislink (void) } static void -mono_gc_register_disappearing_link (MonoObject *obj, void *link) +mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track) { - FinalizeEntry *entry, *prev; + DisappearingLink *entry, *prev; unsigned int hash; LOCK_GC; @@ -3366,7 +3840,7 @@ mono_gc_register_disappearing_link (MonoObject *obj, void *link) prev = NULL; for (; entry; entry = entry->next) { /* link already added */ - if (link == entry->data) { + if (link == entry->link) { /* NULL obj means remove */ if (obj == NULL) { if (prev) @@ -3376,17 +3850,18 @@ mono_gc_register_disappearing_link (MonoObject *obj, void *link) num_disappearing_links--; DEBUG (5, fprintf (gc_debug_file, "Removed dislink %p (%d)\n", entry, num_disappearing_links)); free_internal_mem (entry); + *link = NULL; } else { - entry->object = obj; /* we allow the change of object */ + *link = HIDE_POINTER (obj, track); /* we allow the change of object */ } UNLOCK_GC; return; } prev = entry; } - entry = get_internal_mem (sizeof (FinalizeEntry)); - entry->object = obj; - entry->data = link; + entry = get_internal_mem (sizeof (DisappearingLink)); + *link = HIDE_POINTER (obj, track); + entry->link = link; entry->next = disappearing_link_hash [hash]; disappearing_link_hash [hash] = entry; num_disappearing_links++; @@ -3397,38 +3872,70 @@ mono_gc_register_disappearing_link (MonoObject *obj, void *link) int mono_gc_invoke_finalizers (void) { - FinalizeEntry *entry; + FinalizeEntry *entry = NULL; + gboolean entry_is_critical; int count = 0; void *obj; /* FIXME: batch to reduce lock contention */ - while (fin_ready_list) { + while (fin_ready_list || critical_fin_list) { LOCK_GC; - entry = fin_ready_list; + + if (entry) { + FinalizeEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list; + + /* We have finalized entry in the last + interation, now we need to remove it from + the list. */ + if (*list == entry) + *list = entry->next; + else { + FinalizeEntry *e = *list; + while (e->next != entry) + e = e->next; + e->next = entry->next; + } + free_internal_mem (entry); + entry = NULL; + } + + /* Now look for the first non-null entry. */ + for (entry = fin_ready_list; entry && !entry->object; entry = entry->next) + ; if (entry) { - fin_ready_list = entry->next; + entry_is_critical = FALSE; + } else { + entry_is_critical = TRUE; + for (entry = critical_fin_list; entry && !entry->object; entry = entry->next) + ; + } + + if (entry) { + g_assert (entry->object); num_ready_finalizers--; obj = entry->object; + entry->object = NULL; DEBUG (7, fprintf (gc_debug_file, "Finalizing object %p (%s)\n", obj, safe_name (obj))); } + UNLOCK_GC; - if (entry) { - void (*callback)(void *, void*) = entry->data; - entry->next = NULL; - obj = entry->object; - count++; - /* the object is on the stack so it is pinned */ - /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/ - callback (obj, NULL); - free_internal_mem (entry); - } + + if (!entry) + break; + + g_assert (entry->object == NULL); + count++; + /* the object is on the stack so it is pinned */ + /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/ + mono_gc_run_finalize (obj, NULL); } + g_assert (!entry); return count; } gboolean mono_gc_pending_finalizers (void) { - return fin_ready_list != NULL; + return fin_ready_list || critical_fin_list; } /* Negative value to remove */ @@ -3448,45 +3955,67 @@ mono_gc_add_memory_pressure (gint64 value) */ static void -rehash_roots (void) +rehash_roots (gboolean pinned) { int i; unsigned int hash; RootRecord **new_hash; RootRecord *entry, *next; - int new_size = g_spaced_primes_closest (num_roots_entries); + int new_size; + new_size = g_spaced_primes_closest (num_roots_entries [pinned]); new_hash = get_internal_mem (new_size * sizeof (RootRecord*)); - for (i = 0; i < roots_hash_size; ++i) { - for (entry = roots_hash [i]; entry; entry = next) { + for (i = 0; i < roots_hash_size [pinned]; ++i) { + for (entry = roots_hash [pinned][i]; entry; entry = next) { hash = mono_aligned_addr_hash (entry->start_root) % new_size; next = entry->next; entry->next = new_hash [hash]; new_hash [hash] = entry; } } - free_internal_mem (roots_hash); - roots_hash = new_hash; - roots_hash_size = new_size; + free_internal_mem (roots_hash [pinned]); + roots_hash [pinned] = new_hash; + roots_hash_size [pinned] = new_size; +} + +static RootRecord* +find_root (int root_type, char *start, guint32 addr_hash) +{ + RootRecord *new_root; + + guint32 hash = addr_hash % roots_hash_size [root_type]; + for (new_root = roots_hash [root_type][hash]; new_root; new_root = new_root->next) { + /* we allow changing the size and the descriptor (for thread statics etc) */ + if (new_root->start_root == start) { + return new_root; + } + } + + return NULL; } /* * We do not coalesce roots. */ -int -mono_gc_register_root (char *start, size_t size, void *descr) +static int +mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type) { RootRecord *new_root; - unsigned int hash = mono_aligned_addr_hash (start); + unsigned int hash, addr_hash = mono_aligned_addr_hash (start); + int i; LOCK_GC; - if (num_roots_entries >= roots_hash_size * 2) - rehash_roots (); - hash %= roots_hash_size; - for (new_root = roots_hash [hash]; new_root; new_root = new_root->next) { + for (i = 0; i < ROOT_TYPE_NUM; ++i) { + if (num_roots_entries [i] >= roots_hash_size [i] * 2) + rehash_roots (i); + } + for (i = 0; i < ROOT_TYPE_NUM; ++i) { + new_root = find_root (i, start, addr_hash); /* we allow changing the size and the descriptor (for thread statics etc) */ - if (new_root->start_root == start) { + if (new_root) { size_t old_size = new_root->end_root - new_root->start_root; new_root->end_root = new_root->start_root + size; + g_assert (((new_root->root_desc != 0) && (descr != NULL)) || + ((new_root->root_desc == 0) && (descr == NULL))); new_root->root_desc = (mword)descr; roots_size += size; roots_size -= old_size; @@ -3500,9 +4029,10 @@ mono_gc_register_root (char *start, size_t size, void *descr) new_root->end_root = new_root->start_root + size; new_root->root_desc = (mword)descr; roots_size += size; - num_roots_entries++; - new_root->next = roots_hash [hash]; - roots_hash [hash] = new_root; + hash = addr_hash % roots_hash_size [root_type]; + num_roots_entries [root_type]++; + new_root->next = roots_hash [root_type] [hash]; + roots_hash [root_type][hash] = new_root; DEBUG (3, fprintf (gc_debug_file, "Added root %p for range: %p-%p, descr: %p (%d/%d bytes)\n", new_root, new_root->start_root, new_root->end_root, descr, (int)size, (int)roots_size)); } else { UNLOCK_GC; @@ -3512,28 +4042,45 @@ mono_gc_register_root (char *start, size_t size, void *descr) return TRUE; } +int +mono_gc_register_root (char *start, size_t size, void *descr) +{ + return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED); +} + +int +mono_gc_register_root_wbarrier (char *start, size_t size, void *descr) +{ + return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER); +} + void mono_gc_deregister_root (char* addr) { - RootRecord *tmp, *prev = NULL; - unsigned int hash = mono_aligned_addr_hash (addr); + RootRecord *tmp, *prev; + unsigned int hash, addr_hash = mono_aligned_addr_hash (addr); + int root_type; + LOCK_GC; - hash %= roots_hash_size; - tmp = roots_hash [hash]; - while (tmp) { - if (tmp->start_root == (char*)addr) { - if (prev) - prev->next = tmp->next; - else - roots_hash [hash] = tmp->next; - roots_size -= (tmp->end_root - tmp->start_root); - num_roots_entries--; - DEBUG (3, fprintf (gc_debug_file, "Removed root %p for range: %p-%p\n", tmp, tmp->start_root, tmp->end_root)); - free_internal_mem (tmp); - break; + for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) { + hash = addr_hash % roots_hash_size [root_type]; + tmp = roots_hash [root_type][hash]; + prev = NULL; + while (tmp) { + if (tmp->start_root == (char*)addr) { + if (prev) + prev->next = tmp->next; + else + roots_hash [root_type][hash] = tmp->next; + roots_size -= (tmp->end_root - tmp->start_root); + num_roots_entries [root_type]--; + DEBUG (3, fprintf (gc_debug_file, "Removed root %p for range: %p-%p\n", tmp, tmp->start_root, tmp->end_root)); + free_internal_mem (tmp); + break; + } + prev = tmp; + tmp = tmp->next; } - prev = tmp; - tmp = tmp->next; } UNLOCK_GC; } @@ -3555,7 +4102,12 @@ struct _SgenThreadInfo { int skip; void *stack_end; void *stack_start; + char **tlab_next_addr; + char **tlab_start_addr; + char **tlab_temp_end_addr; + char **tlab_real_end_addr; RememberedSet *remset; + gpointer runtime_data; }; /* FIXME: handle large/small config */ @@ -3594,6 +4146,8 @@ update_current_thread_stack (void *start) SgenThreadInfo *info = thread_info_lookup (ARCH_GET_THREAD ()); info->stack_start = align_pointer (&ptr); ARCH_STORE_REGS (ptr); + if (gc_callbacks.thread_suspend_func) + gc_callbacks.thread_suspend_func (info->runtime_data, NULL); } static const char* @@ -3647,7 +4201,7 @@ thread_handshake (int signum) /* LOCKING: assumes the GC lock is held (by the stopping thread) */ static void -suspend_handler (int sig) +suspend_handler (int sig, siginfo_t *siginfo, void *context) { SgenThreadInfo *info; pthread_t id; @@ -3671,6 +4225,10 @@ suspend_handler (int sig) */ info->stack_start = align_pointer (&id); + /* Notify the JIT */ + if (gc_callbacks.thread_suspend_func) + gc_callbacks.thread_suspend_func (info->runtime_data, context); + /* notify the waiting thread */ sem_post (&suspend_ack_semaphore); info->stop_count = stop_count; @@ -3734,27 +4292,55 @@ restart_world (void) #endif /* USE_SIGNAL_BASED_START_STOP_WORLD */ +void +mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks) +{ + gc_callbacks = *callbacks; +} + +/* Variables holding start/end nursery so it won't have to be passed at every call */ +static void *scan_area_arg_start, *scan_area_arg_end; + +void +mono_gc_conservatively_scan_area (void *start, void *end) +{ + conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end); +} + +void* +mono_gc_scan_object (void *obj) +{ + return copy_object (obj, scan_area_arg_start, scan_area_arg_end); +} + /* - * Identify objects pinned in a thread stack and its registers. + * Mark from thread stacks and registers. */ static void -pin_thread_data (void *start_nursery, void *end_nursery) +scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise) { int i; SgenThreadInfo *info; + scan_area_arg_start = start_nursery; + scan_area_arg_end = end_nursery; + for (i = 0; i < THREAD_HASH_SIZE; ++i) { for (info = thread_table [i]; info; info = info->next) { if (info->skip) { DEBUG (2, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start)); continue; } - DEBUG (2, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start)); - conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery); + DEBUG (2, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot)); + if (gc_callbacks.thread_mark_func) + gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise); + else if (!precise) + conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery); } } - DEBUG (2, fprintf (gc_debug_file, "Scanning current thread registers\n")); - conservatively_pin_objects_from ((void*)cur_thread_regs, (void*)(cur_thread_regs + ARCH_NUM_REGS), start_nursery, end_nursery); + DEBUG (2, fprintf (gc_debug_file, "Scanning current thread registers, pinned=%d\n", next_pin_slot)); + if (!precise) + conservatively_pin_objects_from ((void*)cur_thread_regs, (void*)(cur_thread_regs + ARCH_NUM_REGS), start_nursery, end_nursery); } static void @@ -3802,11 +4388,18 @@ handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global switch ((*p) & REMSET_TYPE_MASK) { case REMSET_LOCATION: ptr = (void**)(*p); + //__builtin_prefetch (ptr); if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery) && ptr_in_heap (ptr)) { *ptr = copy_object (*ptr, start_nursery, end_nursery); DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr)); - if (!global && *ptr >= start_nursery && *ptr < end_nursery) - add_to_global_remset (ptr); + if (!global && *ptr >= start_nursery && *ptr < end_nursery) { + /* + * If the object is pinned, each reference to it from nonpinned objects + * becomes part of the global remset, which can grow very large. + */ + DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr))); + add_to_global_remset (ptr, FALSE); + } } else { DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr)); } @@ -3820,7 +4413,7 @@ handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global *ptr = copy_object (*ptr, start_nursery, end_nursery); DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count)); if (!global && *ptr >= start_nursery && *ptr < end_nursery) - add_to_global_remset (ptr); + add_to_global_remset (ptr, FALSE); ++ptr; } return p + 2; @@ -3830,13 +4423,34 @@ handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global return p + 1; scan_object (*ptr, start_nursery, end_nursery); return p + 1; - case REMSET_VTYPE: + case REMSET_OTHER: { ptr = (void**)(*p & ~REMSET_TYPE_MASK); - if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery) || !ptr_in_heap (ptr)) + + switch (p [1]) { + case REMSET_VTYPE: + if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery) || !ptr_in_heap (ptr)) + return p + 3; + desc = p [2]; + scan_vtype ((char*)ptr, desc, start_nursery, end_nursery); + return p + 3; + case REMSET_ROOT_LOCATION: + /* Same as REMSET_LOCATION, but the address is not required to be in the heap */ + *ptr = copy_object (*ptr, start_nursery, end_nursery); + DEBUG (9, fprintf (gc_debug_file, "Overwrote root location remset at %p with %p\n", ptr, *ptr)); + if (!global && *ptr >= start_nursery && *ptr < end_nursery) { + /* + * If the object is pinned, each reference to it from nonpinned objects + * becomes part of the global remset, which can grow very large. + */ + DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr))); + add_to_global_remset (ptr, TRUE); + } return p + 2; - desc = p [1]; - scan_vtype ((char*)ptr, desc, start_nursery, end_nursery); - return p + 2; + default: + g_assert_not_reached (); + } + break; + } default: g_assert_not_reached (); } @@ -3849,15 +4463,40 @@ scan_from_remsets (void *start_nursery, void *end_nursery) int i; SgenThreadInfo *info; RememberedSet *remset, *next; - mword *p; + mword *p, *next_p, *store_pos; /* the global one */ for (remset = global_remset; remset; remset = remset->next) { DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data)); - for (p = remset->data; p < remset->store_next;) { - p = handle_remset (p, start_nursery, end_nursery, TRUE); + store_pos = remset->data; + for (p = remset->data; p < remset->store_next; p = next_p) { + mword ptr; + + next_p = handle_remset (p, start_nursery, end_nursery, TRUE); + + /* + * Clear global remsets of locations which no longer point to the + * nursery. Otherwise, they could grow indefinitely between major + * collections. + */ + ptr = (p [0] & ~REMSET_TYPE_MASK); + if ((p [0] & REMSET_TYPE_MASK) == REMSET_LOCATION) { + if (ptr_in_nursery (*(void**)ptr)) + *store_pos ++ = p [0]; + } else { + g_assert ((p [0] & REMSET_TYPE_MASK) == REMSET_OTHER); + g_assert (p [1] == REMSET_ROOT_LOCATION); + if (ptr_in_nursery (*(void**)ptr)) { + *store_pos ++ = p [0]; + *store_pos ++ = p [1]; + } + } } + + /* Truncate the remset */ + remset->store_next = store_pos; } + /* the per-thread ones */ for (i = 0; i < THREAD_HASH_SIZE; ++i) { for (info = thread_table [i]; info; info = info->next) { @@ -3916,6 +4555,45 @@ clear_remsets (void) } } +/* + * Clear the thread local TLAB variables for all threads. + */ +static void +clear_tlabs (void) +{ + SgenThreadInfo *info; + int i; + + for (i = 0; i < THREAD_HASH_SIZE; ++i) { + for (info = thread_table [i]; info; info = info->next) { + /* A new TLAB will be allocated when the thread does its first allocation */ + *info->tlab_start_addr = NULL; + *info->tlab_next_addr = NULL; + *info->tlab_temp_end_addr = NULL; + *info->tlab_real_end_addr = NULL; + } + } +} + +/* + * Find the tlab_next value of the TLAB which contains ADDR. + */ +static char* +find_tlab_next_from_address (char *addr) +{ + SgenThreadInfo *info; + int i; + + for (i = 0; i < THREAD_HASH_SIZE; ++i) { + for (info = thread_table [i]; info; info = info->next) { + if (addr >= *info->tlab_start_addr && addr < *info->tlab_next_addr) + return *info->tlab_next_addr; + } + } + + return NULL; +} + /* LOCKING: assumes the GC lock is held */ static SgenThreadInfo* gc_register_current_thread (void *addr) @@ -3929,6 +4607,12 @@ gc_register_current_thread (void *addr) info->skip = 0; info->signal = 0; info->stack_start = NULL; + info->tlab_start_addr = &tlab_start; + info->tlab_next_addr = &tlab_next; + info->tlab_temp_end_addr = &tlab_temp_end; + info->tlab_real_end_addr = &tlab_real_end; + + tlab_next_addr = &tlab_next; /* try to get it with attributes first */ #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK) @@ -3961,6 +4645,10 @@ gc_register_current_thread (void *addr) remembered_set = info->remset = alloc_remset (DEFAULT_REMSET_SIZE, info); pthread_setspecific (remembered_set_key, remembered_set); DEBUG (3, fprintf (gc_debug_file, "registered thread %p (%p) (hash: %d)\n", info, (gpointer)info->id, hash)); + + if (gc_callbacks.thread_attach_func) + info->runtime_data = gc_callbacks.thread_attach_func (); + return info; } @@ -4119,7 +4807,7 @@ void mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value) { RememberedSet *rs; - if ((char*)field_ptr >= nursery_start && (char*)field_ptr < nursery_real_end) { + if (ptr_in_nursery (field_ptr)) { *(void**)field_ptr = value; return; } @@ -4142,7 +4830,7 @@ void mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value) { RememberedSet *rs = remembered_set; - if ((char*)slot_ptr >= nursery_start && (char*)slot_ptr < nursery_real_end) { + if (ptr_in_nursery (slot_ptr)) { *(void**)slot_ptr = value; return; } @@ -4164,7 +4852,7 @@ void mono_gc_wbarrier_arrayref_copy (MonoArray *arr, gpointer slot_ptr, int count) { RememberedSet *rs = remembered_set; - if ((char*)slot_ptr >= nursery_start && (char*)slot_ptr < nursery_real_end) + if (ptr_in_nursery (slot_ptr)) return; DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p, %d\n", slot_ptr, count)); if (rs->store_next + 1 < rs->end_set) { @@ -4183,13 +4871,14 @@ mono_gc_wbarrier_arrayref_copy (MonoArray *arr, gpointer slot_ptr, int count) void mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value) { - RememberedSet *rs = remembered_set; - if ((char*)ptr >= nursery_start && (char*)ptr < nursery_real_end) { + RememberedSet *rs; + if (ptr_in_nursery (ptr)) { DEBUG (8, fprintf (gc_debug_file, "Skipping remset at %p\n", ptr)); *(void**)ptr = value; return; } - DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", ptr)); + rs = remembered_set; + DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p (%s)\n", ptr, value ? safe_name (value) : "null")); /* FIXME: ensure it is on the heap */ if (rs->store_next < rs->end_set) { *(rs->store_next++) = (mword)ptr; @@ -4205,16 +4894,40 @@ mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value) } void -mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass) +mono_gc_wbarrier_set_root (gpointer ptr, MonoObject *value) { RememberedSet *rs = remembered_set; - if ((char*)dest >= nursery_start && (char*)dest < nursery_real_end) { + if (ptr_in_nursery (ptr)) + return; + DEBUG (8, fprintf (gc_debug_file, "Adding root remset at %p (%s)\n", ptr, value ? safe_name (value) : "null")); + + if (rs->store_next + 2 < rs->end_set) { + *(rs->store_next++) = (mword)ptr | REMSET_OTHER; + *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION; + *(void**)ptr = value; return; } - DEBUG (1, fprintf (gc_debug_file, "Adding value remset at %p, count %d for class %s\n", dest, count, klass->name)); + rs = alloc_remset (rs->end_set - rs->data, (void*)1); + rs->next = remembered_set; + remembered_set = rs; + thread_info_lookup (ARCH_GET_THREAD ())->remset = rs; + *(rs->store_next++) = (mword)ptr | REMSET_OTHER; + *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION; - if (rs->store_next + 1 < rs->end_set) { - *(rs->store_next++) = (mword)dest | REMSET_VTYPE; + *(void**)ptr = value; +} + +void +mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass) +{ + RememberedSet *rs = remembered_set; + if (ptr_in_nursery (dest)) + return; + DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d for class %s\n", dest, count, klass->name)); + + if (rs->store_next + 2 < rs->end_set) { + *(rs->store_next++) = (mword)dest | REMSET_OTHER; + *(rs->store_next++) = (mword)REMSET_VTYPE; *(rs->store_next++) = (mword)klass->gc_descr; return; } @@ -4222,7 +4935,8 @@ mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass * rs->next = remembered_set; remembered_set = rs; thread_info_lookup (ARCH_GET_THREAD ())->remset = rs; - *(rs->store_next++) = (mword)dest | REMSET_VTYPE; + *(rs->store_next++) = (mword)dest | REMSET_OTHER; + *(rs->store_next++) = (mword)REMSET_VTYPE; *(rs->store_next++) = (mword)klass->gc_descr; } @@ -4272,7 +4986,7 @@ describe_ptr (char *ptr) mword desc; int type; - if ((ptr >= nursery_start) && (ptr < nursery_real_end)) { + if (ptr_in_nursery (ptr)) { printf ("Pointer inside nursery.\n"); } else { for (section = section_list; section;) { @@ -4283,16 +4997,32 @@ describe_ptr (char *ptr) if (section) { printf ("Pointer inside oldspace.\n"); + } else if (obj_is_from_pinned_alloc (ptr)) { + printf ("Pointer is inside a pinned chunk.\n"); } else { printf ("Pointer unknown.\n"); return; } } + if (object_is_pinned (ptr)) + printf ("Object is pinned.\n"); + + if (object_is_forwarded (ptr)) + printf ("Object is forwared.\n"); + // FIXME: Handle pointers to the inside of objects - vtable = (MonoVTable*)((mword*)ptr) [0]; + vtable = (MonoVTable*)LOAD_VTABLE (ptr); printf ("VTable: %p\n", vtable); + if (vtable == NULL) { + printf ("VTable is invalid (empty).\n"); + return; + } + if (ptr_in_nursery (vtable)) { + printf ("VTable is invalid (points inside nursery).\n"); + return; + } printf ("Class: %s\n", vtable->klass->name); desc = ((GCVTable*)vtable)->desc; @@ -4329,24 +5059,33 @@ find_in_remset_loc (mword *p, char *addr, gboolean *found) if ((void**)addr >= ptr && (void**)addr < ptr + count) *found = TRUE; return p + 1; - case REMSET_VTYPE: - ptr = (void**)(*p & ~REMSET_TYPE_MASK); - desc = p [1]; - - switch (desc & 0x7) { - case DESC_TYPE_RUN_LENGTH: - OBJ_RUN_LEN_SIZE (skip_size, desc, ptr); - /* The descriptor includes the size of MonoObject */ - skip_size -= sizeof (MonoObject); - if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer))) - *found = TRUE; - break; + case REMSET_OTHER: { + switch (p [1]) { + case REMSET_VTYPE: + ptr = (void**)(*p & ~REMSET_TYPE_MASK); + desc = p [2]; + + switch (desc & 0x7) { + case DESC_TYPE_RUN_LENGTH: + OBJ_RUN_LEN_SIZE (skip_size, desc, ptr); + /* The descriptor includes the size of MonoObject */ + skip_size -= sizeof (MonoObject); + if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer))) + *found = TRUE; + break; + default: + // FIXME: + g_assert_not_reached (); + } + + return p + 3; + case REMSET_ROOT_LOCATION: + return p + 2; default: - // FIXME: g_assert_not_reached (); } - - return p + 2; + break; + } default: g_assert_not_reached (); } @@ -4525,6 +5264,79 @@ check_consistency (void) DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n")); } +/* Check that the reference is valid */ +#undef HANDLE_PTR +#define HANDLE_PTR(ptr,obj) do { \ + if (*(ptr)) { \ + g_assert (safe_name (*(ptr)) != NULL); \ + } \ + } while (0) + +/* + * check_object: + * + * Perform consistency check on an object. Currently we only check that the + * reference fields are valid. + */ +char* +check_object (char *start) +{ + GCVTable *vt; + size_t skip_size; + mword desc; + + if (!start) + return NULL; + + vt = (GCVTable*)LOAD_VTABLE (start); + //type = vt->desc & 0x7; + + desc = vt->desc; + switch (desc & 0x7) { + case DESC_TYPE_STRING: + STRING_SIZE (skip_size, start); + return start + skip_size; + case DESC_TYPE_RUN_LENGTH: + OBJ_RUN_LEN_FOREACH_PTR (desc,start); + OBJ_RUN_LEN_SIZE (skip_size, desc, start); + g_assert (skip_size); + return start + skip_size; + case DESC_TYPE_ARRAY: + case DESC_TYPE_VECTOR: + OBJ_VECTOR_FOREACH_PTR (vt, start); + skip_size = safe_object_get_size ((MonoObject*)start); + skip_size += (ALLOC_ALIGN - 1); + skip_size &= ~(ALLOC_ALIGN - 1); + return start + skip_size; + case DESC_TYPE_SMALL_BITMAP: + OBJ_BITMAP_FOREACH_PTR (desc,start); + OBJ_BITMAP_SIZE (skip_size, desc, start); + return start + skip_size; + case DESC_TYPE_LARGE_BITMAP: + OBJ_LARGE_BITMAP_FOREACH_PTR (vt,start); + skip_size = safe_object_get_size ((MonoObject*)start); + skip_size += (ALLOC_ALIGN - 1); + skip_size &= ~(ALLOC_ALIGN - 1); + return start + skip_size; + case DESC_TYPE_COMPLEX: + OBJ_COMPLEX_FOREACH_PTR (vt, start); + /* this is a complex object */ + skip_size = safe_object_get_size ((MonoObject*)start); + skip_size += (ALLOC_ALIGN - 1); + skip_size &= ~(ALLOC_ALIGN - 1); + return start + skip_size; + case DESC_TYPE_COMPLEX_ARR: + OBJ_COMPLEX_ARR_FOREACH_PTR (vt, start); + /* this is an array of complex structs */ + skip_size = safe_object_get_size ((MonoObject*)start); + skip_size += (ALLOC_ALIGN - 1); + skip_size &= ~(ALLOC_ALIGN - 1); + return start + skip_size; + } + g_assert_not_reached (); + return NULL; +} + /* * ###################################################################### * ######## Other mono public interface functions. @@ -4607,7 +5419,7 @@ mono_object_is_alive (MonoObject* o) int mono_gc_get_generation (MonoObject *obj) { - if ((char*)obj >= nursery_start && (char*)obj < nursery_real_end) + if (ptr_in_nursery (obj)) return 0; return 1; } @@ -4618,35 +5430,46 @@ mono_gc_enable_events (void) } void -mono_gc_weak_link_add (void **link_addr, MonoObject *obj) +mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track) { - mono_gc_register_disappearing_link (obj, link_addr); - *link_addr = obj; + mono_gc_register_disappearing_link (obj, link_addr, track); } void mono_gc_weak_link_remove (void **link_addr) { - mono_gc_register_disappearing_link (NULL, link_addr); - *link_addr = NULL; + mono_gc_register_disappearing_link (NULL, link_addr, FALSE); } MonoObject* mono_gc_weak_link_get (void **link_addr) { - return *link_addr; + if (!*link_addr) + return NULL; + return (MonoObject*) REVEAL_POINTER (*link_addr); } void* mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits) { if (numbits < ((sizeof (*bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) { - mword desc = ROOT_DESC_BITMAP | (bitmap [0] << ROOT_DESC_TYPE_SHIFT); - return (void*)desc; + return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, bitmap [0]); + } else { + mword complex = alloc_complex_descriptor (bitmap, numbits + 1); + return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX, complex); } - /* conservative scanning */ - DEBUG (3, fprintf (gc_debug_file, "Conservative root descr for size: %d\n", numbits)); - return NULL; +} + +void* +mono_gc_make_root_descr_user (MonoGCMarkFunc marker) +{ + void *descr; + + g_assert (user_descriptors_next < MAX_USER_DESCRIPTORS); + descr = (void*)MAKE_ROOT_DESC (ROOT_DESC_USER, (mword)user_descriptors_next); + user_descriptors [user_descriptors_next ++] = marker; + + return descr; } void* @@ -4695,7 +5518,6 @@ mono_gc_base_init (void) } pagesize = mono_pagesize (); gc_debug_file = stderr; - /* format: MONO_GC_DEBUG=[l[:filename]|