#include "metadata/object-internals.h"
#include "metadata/threads.h"
#include "metadata/sgen-cardtable.h"
+#include "metadata/sgen-ssb.h"
#include "metadata/sgen-protocol.h"
#include "metadata/sgen-archdep.h"
#include "metadata/sgen-bridge.h"
#include "metadata/mempool-internals.h"
#include "metadata/marshal.h"
#include "metadata/runtime.h"
+#include "metadata/sgen-cardtable.h"
+#include "metadata/sgen-pinning.h"
+#include "metadata/sgen-workers.h"
#include "utils/mono-mmap.h"
#include "utils/mono-time.h"
#include "utils/mono-semaphore.h"
/* 0 means not initialized, 1 is initialized, -1 means in progress */
static gint32 gc_initialized = 0;
/* If set, do a minor collection before every X allocation */
-static guint32 collect_before_allocs = 0;
+guint32 collect_before_allocs = 0;
/* If set, do a heap consistency check before each minor collection */
static gboolean consistency_check_at_minor_collection = FALSE;
/* If set, check that there are no references to the domain left at domain unload */
static gboolean nursery_collection_is_parallel = FALSE;
static gboolean disable_minor_collections = FALSE;
static gboolean disable_major_collections = FALSE;
-static gboolean do_pin_stats = FALSE;
+gboolean do_pin_stats = FALSE;
static gboolean do_verify_nursery = FALSE;
static gboolean do_dump_nursery_content = FALSE;
#ifdef HEAVY_STATISTICS
-static long long stat_objects_alloced = 0;
-static long long stat_bytes_alloced = 0;
long long stat_objects_alloced_degraded = 0;
long long stat_bytes_alloced_degraded = 0;
-static long long stat_bytes_alloced_los = 0;
long long stat_copy_object_called_nursery = 0;
long long stat_objects_copied_nursery = 0;
long long stat_nursery_copy_object_failed_forwarded = 0;
long long stat_nursery_copy_object_failed_pinned = 0;
-static long long stat_store_remsets = 0;
-static long long stat_store_remsets_unique = 0;
-static long long stat_saved_remsets_1 = 0;
-static long long stat_saved_remsets_2 = 0;
-static long long stat_local_remsets_processed = 0;
-static long long stat_global_remsets_added = 0;
-static long long stat_global_remsets_readded = 0;
-static long long stat_global_remsets_processed = 0;
-static long long stat_global_remsets_discarded = 0;
-
static int stat_wbarrier_set_field = 0;
static int stat_wbarrier_set_arrayref = 0;
static int stat_wbarrier_arrayref_copy = 0;
static int stat_wbarrier_generic_store = 0;
-static int stat_wbarrier_generic_store_remset = 0;
static int stat_wbarrier_set_root = 0;
static int stat_wbarrier_value_copy = 0;
static int stat_wbarrier_object_copy = 0;
#endif
+int stat_minor_gcs = 0;
+int stat_major_gcs = 0;
+
static long long stat_pinned_objects = 0;
static long long time_minor_pre_collection_fragment_clear = 0;
static long long time_minor_pinning = 0;
static long long time_minor_scan_remsets = 0;
-static long long time_minor_scan_card_table = 0;
static long long time_minor_scan_pinned = 0;
static long long time_minor_scan_registered_roots = 0;
static long long time_minor_scan_thread_data = 0;
}
*/
-/*
- * Define this to allow the user to change the nursery size by
- * specifying its value in the MONO_GC_PARAMS environmental
- * variable. See mono_gc_base_init for details.
- */
-#define USER_CONFIG 1
-
#define TV_DECLARE SGEN_TV_DECLARE
#define TV_GETTIME SGEN_TV_GETTIME
#define TV_ELAPSED SGEN_TV_ELAPSED
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
-static NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
+NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
/* the runtime can register areas of memory as roots: we keep two lists of roots,
* a pinned root set for conservatively scanned roots and a normal one for
mword root_desc;
};
-/*
- * We're never actually using the first element. It's always set to
- * NULL to simplify the elimination of consecutive duplicate
- * entries.
- */
-#define STORE_REMSET_BUFFER_SIZE 1023
-
-typedef struct _GenericStoreRememberedSet GenericStoreRememberedSet;
-struct _GenericStoreRememberedSet {
- GenericStoreRememberedSet *next;
- /* We need one entry less because the first entry of store
- remset buffers is always a dummy and we don't copy it. */
- gpointer data [STORE_REMSET_BUFFER_SIZE - 1];
-};
-
-/* we have 4 possible values in the low 2 bits */
-enum {
- REMSET_LOCATION, /* just a pointer to the exact location */
- REMSET_RANGE, /* range of pointer fields */
- REMSET_OBJECT, /* mark all the object for scanning */
- REMSET_VTYPE, /* a valuetype array described by a gc descriptor, a count and a size */
- REMSET_TYPE_MASK = 0x3
-};
-
-#ifdef HAVE_KW_THREAD
-static __thread RememberedSet *remembered_set MONO_TLS_FAST;
-#endif
-static MonoNativeTlsKey remembered_set_key;
-static RememberedSet *global_remset;
-static RememberedSet *freed_thread_remsets;
-static GenericStoreRememberedSet *generic_store_remsets = NULL;
-
-/*A two slots cache for recently inserted remsets */
-static gpointer global_remset_cache [2];
-
-/* FIXME: later choose a size that takes into account the RememberedSet struct
- * and doesn't waste any alloc paddin space.
- */
-#define DEFAULT_REMSET_SIZE 1024
-static RememberedSet* alloc_remset (int size, gpointer id, gboolean global);
-
#define object_is_forwarded SGEN_OBJECT_IS_FORWARDED
#define object_is_pinned SGEN_OBJECT_IS_PINNED
#define pin_object SGEN_PIN_OBJECT
#define unpin_object SGEN_UNPIN_OBJECT
-#define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), DEFAULT_NURSERY_BITS, nursery_start, nursery_end))
+#define ptr_in_nursery mono_sgen_ptr_in_nursery
#define LOAD_VTABLE SGEN_LOAD_VTABLE
* ######## Global data.
* ######################################################################
*/
-static LOCK_DECLARE (gc_mutex);
+LOCK_DECLARE (gc_mutex);
static int gc_disabled = 0;
-static int num_minor_gcs = 0;
-static int num_major_gcs = 0;
static gboolean use_cardtable;
-#ifdef USER_CONFIG
-
-/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
-#define DEFAULT_NURSERY_SIZE (default_nursery_size)
-static int default_nursery_size = (1 << 22);
-#ifdef SGEN_ALIGN_NURSERY
-/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
-#define DEFAULT_NURSERY_BITS (default_nursery_bits)
-static int default_nursery_bits = 22;
-#endif
-
-#else
-
-#define DEFAULT_NURSERY_SIZE (4*1024*1024)
-#ifdef SGEN_ALIGN_NURSERY
-#define DEFAULT_NURSERY_BITS 22
-#endif
-
-#endif
-
-#ifndef SGEN_ALIGN_NURSERY
-#define DEFAULT_NURSERY_BITS -1
-#endif
-
#define MIN_MINOR_COLLECTION_ALLOWANCE (DEFAULT_NURSERY_SIZE * 4)
#define SCAN_START_SIZE SGEN_SCAN_START_SIZE
static mword pagesize = 4096;
static mword nursery_size;
-static int degraded_mode = 0;
+int degraded_mode = 0;
static mword bytes_pinned_from_failed_allocation = 0;
static int last_los_memory_usage = 0;
static gboolean major_collection_happened = FALSE;
-static GCMemSection *nursery_section = NULL;
+GCMemSection *nursery_section = NULL;
static mword lowest_heap_address = ~(mword)0;
static mword highest_heap_address = 0;
static LOCK_DECLARE (interruption_mutex);
-static LOCK_DECLARE (global_remset_mutex);
static LOCK_DECLARE (pin_queue_mutex);
-#define LOCK_GLOBAL_REMSET mono_mutex_lock (&global_remset_mutex)
-#define UNLOCK_GLOBAL_REMSET mono_mutex_unlock (&global_remset_mutex)
-
#define LOCK_PIN_QUEUE mono_mutex_lock (&pin_queue_mutex)
#define UNLOCK_PIN_QUEUE mono_mutex_unlock (&pin_queue_mutex)
report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)LOAD_VTABLE (object))->klass;
}
-/*
- * The current allocation cursors
- * We allocate objects in the nursery.
- * The nursery is the area between nursery_start and nursery_end.
- * Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
- * from nursery fragments.
- * tlab_next is the pointer to the space inside the TLAB where the next object will
- * be allocated.
- * tlab_temp_end is the pointer to the end of the temporary space reserved for
- * the allocation: it allows us to set the scan starts at reasonable intervals.
- * tlab_real_end points to the end of the TLAB.
- * nursery_frag_real_end points to the end of the currently used nursery fragment.
- * nursery_first_pinned_start points to the start of the first pinned object in the nursery
- * nursery_last_pinned_end points to the end of the last pinned object in the nursery
- * At the next allocation, the area of the nursery where objects can be present is
- * between MIN(nursery_first_pinned_start, first_fragment_start) and
- * MAX(nursery_last_pinned_end, nursery_frag_real_end)
- */
-static char *nursery_start = NULL;
-static char *nursery_end = NULL;
-static char *nursery_alloc_bound = NULL;
-
-#ifdef HAVE_KW_THREAD
-#define TLAB_ACCESS_INIT
-#define TLAB_START tlab_start
-#define TLAB_NEXT tlab_next
-#define TLAB_TEMP_END tlab_temp_end
-#define TLAB_REAL_END tlab_real_end
-#define REMEMBERED_SET remembered_set
-#define STORE_REMSET_BUFFER store_remset_buffer
-#define STORE_REMSET_BUFFER_INDEX store_remset_buffer_index
-#define IN_CRITICAL_REGION thread_info->in_critical_region
-#else
-static MonoNativeTlsKey thread_info_key;
-#define TLAB_ACCESS_INIT SgenThreadInfo *__thread_info__ = mono_native_tls_get_value (thread_info_key)
-#define TLAB_START (__thread_info__->tlab_start)
-#define TLAB_NEXT (__thread_info__->tlab_next)
-#define TLAB_TEMP_END (__thread_info__->tlab_temp_end)
-#define TLAB_REAL_END (__thread_info__->tlab_real_end)
-#define REMEMBERED_SET (__thread_info__->remset)
-#define STORE_REMSET_BUFFER (__thread_info__->store_remset_buffer)
-#define STORE_REMSET_BUFFER_INDEX (__thread_info__->store_remset_buffer_index)
-#define IN_CRITICAL_REGION (__thread_info__->in_critical_region)
-#endif
-
-#ifndef DISABLE_CRITICAL_REGION
-
-/* Enter must be visible before anything is done in the critical region. */
-#define ENTER_CRITICAL_REGION do { mono_atomic_store_acquire (&IN_CRITICAL_REGION, 1); } while (0)
-
-/* Exit must make sure all critical regions stores are visible before it signal the end of the region.
- * We don't need to emit a full barrier since we
- */
-#define EXIT_CRITICAL_REGION do { mono_atomic_store_release (&IN_CRITICAL_REGION, 0); } while (0)
-
-
-#endif
+MonoNativeTlsKey thread_info_key;
-/*
- * FIXME: What is faster, a TLS variable pointing to a structure, or separate TLS
- * variables for next+temp_end ?
- */
#ifdef HAVE_KW_THREAD
-static __thread SgenThreadInfo *thread_info;
-static __thread char *tlab_start;
-static __thread char *tlab_next;
-static __thread char *tlab_temp_end;
-static __thread char *tlab_real_end;
-static __thread gpointer *store_remset_buffer;
-static __thread long store_remset_buffer_index;
-/* Used by the managed allocator/wbarrier */
-static __thread char **tlab_next_addr;
-static __thread char *stack_end;
-static __thread long *store_remset_buffer_index_addr;
+__thread SgenThreadInfo *thread_info;
+__thread gpointer *store_remset_buffer;
+__thread long store_remset_buffer_index;
+__thread char *stack_end;
+__thread long *store_remset_buffer_index_addr;
#endif
/* The size of a TLAB */
* FIXME: Tune this.
* FIXME: Make this self-tuning for each thread.
*/
-static guint32 tlab_size = (1024 * 4);
+guint32 tlab_size = (1024 * 4);
#define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
static int stop_world (int generation);
static int restart_world (int generation);
static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise, GrayQueue *queue);
-static void scan_from_global_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue);
-static void scan_from_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue);
static void scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue);
static void scan_finalizer_entries (CopyOrMarkObjectFunc copy_func, FinalizeReadyEntry *list, GrayQueue *queue);
static void report_finalizer_roots (void);
static void pin_from_roots (void *start_nursery, void *end_nursery, GrayQueue *queue);
static int pin_objects_from_addresses (GCMemSection *section, void **start, void **end, void *start_nursery, void *end_nursery, GrayQueue *queue);
-static void optimize_pin_queue (int start_slot);
-static void clear_remsets (void);
-static void clear_tlabs (void);
-static void sort_addresses (void **array, int size);
-static gboolean drain_gray_stack (GrayQueue *queue, int max_objs);
static void finish_gray_stack (char *start_addr, char *end_addr, int generation, GrayQueue *queue);
static gboolean need_major_collection (mword space_needed);
static void major_collection (const char *reason);
-static gboolean collection_is_parallel (void);
-
static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track, gboolean in_gc);
static gboolean mono_gc_is_critical_method (MonoMethod *method);
-void describe_ptr (char *ptr);
-void check_object (char *start);
-
-static void check_consistency (void);
-static void check_major_refs (void);
-static void check_scan_starts (void);
-static void check_for_xdomain_refs (void);
-static void dump_heap (const char *type, int num, const char *reason);
-
void mono_gc_scan_for_specific_ref (MonoObject *key, gboolean precise);
static void init_stats (void);
static void null_ephemerons_for_domain (MonoDomain *domain);
SgenMajorCollector major_collector;
+static GrayQueue gray_queue;
-#include "sgen-pinning.c"
-#include "sgen-pinning-stats.c"
-#include "sgen-gray.c"
-#include "sgen-workers.c"
-#include "sgen-cardtable.c"
+static SgenRemeberedSet remset;
+
+
+#define WORKERS_DISTRIBUTE_GRAY_QUEUE (mono_sgen_collection_is_parallel () ? mono_sgen_workers_get_distribute_gray_queue () : &gray_queue)
+
+static SgenGrayQueue*
+mono_sgen_workers_get_job_gray_queue (WorkerData *worker_data)
+{
+ return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
+}
static gboolean
is_xdomain_ref_allowed (gpointer *ptr, char *obj, MonoDomain *domain)
return TRUE;
if (o->vtable->klass == mono_defaults.internal_thread_class && offset == G_STRUCT_OFFSET (MonoInternalThread, current_appcontext))
return TRUE;
- if (mono_class_has_parent (o->vtable->klass, mono_defaults.real_proxy_class) &&
+ if (mono_class_has_parent_fast (o->vtable->klass, mono_defaults.real_proxy_class) &&
offset == G_STRUCT_OFFSET (MonoRealProxy, unwrapped_server))
return TRUE;
/* Thread.cached_culture_info */
size = ALIGN_UP (safe_object_get_size ((MonoObject*)obj));
- callback (obj, size, data);
+ if (SGEN_LOAD_VTABLE (obj) != array_fill_vtable)
+ callback (obj, size, data);
start += size;
}
g_assert (mono_object_domain (start) == mono_get_root_domain ());
/* The object could be a proxy for an object in the domain
we're deleting. */
- if (mono_class_has_parent (vt->klass, mono_defaults.real_proxy_class)) {
+ if (mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
/* The server could already have been zeroed out, so
UNLOCK_GC;
}
-static void
-global_remset_cache_clear (void)
-{
- memset (global_remset_cache, 0, sizeof (global_remset_cache));
-}
-
-/*
- * Tries to check if a given remset location was already added to the global remset.
- * It can
- *
- * A 2 entry, LRU cache of recently saw location remsets.
- *
- * It's hand-coded instead of done using loops to reduce the number of memory references on cache hit.
- *
- * Returns TRUE is the element was added..
- */
-static gboolean
-global_remset_location_was_not_added (gpointer ptr)
-{
-
- gpointer first = global_remset_cache [0], second;
- if (first == ptr) {
- HEAVY_STAT (++stat_global_remsets_discarded);
- return FALSE;
- }
-
- second = global_remset_cache [1];
-
- if (second == ptr) {
- /*Move the second to the front*/
- global_remset_cache [0] = second;
- global_remset_cache [1] = first;
-
- HEAVY_STAT (++stat_global_remsets_discarded);
- return FALSE;
- }
-
- global_remset_cache [0] = second;
- global_remset_cache [1] = ptr;
- return TRUE;
-}
-
/*
* mono_sgen_add_to_global_remset:
*
void
mono_sgen_add_to_global_remset (gpointer ptr)
{
- RememberedSet *rs;
- gboolean lock = collection_is_parallel ();
- gpointer obj = *(gpointer*)ptr;
-
- if (use_cardtable) {
- sgen_card_table_mark_address ((mword)ptr);
- return;
- }
-
- g_assert (!ptr_in_nursery (ptr) && ptr_in_nursery (obj));
-
- if (lock)
- LOCK_GLOBAL_REMSET;
-
- if (!global_remset_location_was_not_added (ptr))
- goto done;
-
- if (G_UNLIKELY (do_pin_stats))
- mono_sgen_pin_stats_register_global_remset (obj);
-
- DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr));
- binary_protocol_global_remset (ptr, *(gpointer*)ptr, (gpointer)LOAD_VTABLE (obj));
-
- HEAVY_STAT (++stat_global_remsets_added);
-
- /*
- * FIXME: If an object remains pinned, we need to add it at every minor collection.
- * To avoid uncontrolled growth of the global remset, only add each pointer once.
- */
- if (global_remset->store_next + 3 < global_remset->end_set) {
- *(global_remset->store_next++) = (mword)ptr;
- goto done;
- }
- rs = alloc_remset (global_remset->end_set - global_remset->data, NULL, TRUE);
- rs->next = global_remset;
- global_remset = rs;
- *(global_remset->store_next++) = (mword)ptr;
-
- {
- int global_rs_size = 0;
-
- for (rs = global_remset; rs; rs = rs->next) {
- global_rs_size += rs->store_next - rs->data;
- }
- DEBUG (4, fprintf (gc_debug_file, "Global remset now has size %d\n", global_rs_size));
- }
-
- done:
- if (lock)
- UNLOCK_GLOBAL_REMSET;
+ remset.record_pointer (ptr);
}
/*
- * drain_gray_stack:
+ * mono_sgen_drain_gray_stack:
*
* Scan objects in the gray stack until the stack is empty. This should be called
* frequently after each object is copied, to achieve better locality and cache
* usage.
*/
-static gboolean
-drain_gray_stack (GrayQueue *queue, int max_objs)
+gboolean
+mono_sgen_drain_gray_stack (GrayQueue *queue, int max_objs)
{
char *obj;
} else {
int i;
- if (collection_is_parallel () && queue == &workers_distribute_gray_queue)
+ if (mono_sgen_collection_is_parallel () && mono_sgen_workers_is_distributed_queue (queue))
return TRUE;
do {
void
mono_sgen_pin_object (void *object, GrayQueue *queue)
{
- if (collection_is_parallel ()) {
+ if (mono_sgen_collection_is_parallel ()) {
LOCK_PIN_QUEUE;
/*object arrives pinned*/
- pin_stage_ptr (object);
+ mono_sgen_pin_stage_ptr (object);
++objects_pinned ;
UNLOCK_PIN_QUEUE;
} else {
SGEN_PIN_OBJECT (object);
- pin_stage_ptr (object);
+ mono_sgen_pin_stage_ptr (object);
++objects_pinned;
if (G_UNLIKELY (do_pin_stats))
mono_sgen_pin_stats_register_object (object, safe_object_get_size (object));
/* Sort the addresses in array in increasing order.
* Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
*/
-static void
-sort_addresses (void **array, int size)
+void
+mono_sgen_sort_addresses (void **array, int size)
{
int i;
void *tmp;
}
}
-static G_GNUC_UNUSED void
-print_nursery_gaps (void* start_nursery, void *end_nursery)
-{
- int i;
- gpointer first = start_nursery;
- gpointer next;
- for (i = 0; i < next_pin_slot; ++i) {
- next = pin_queue [i];
- fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
- first = next;
- }
- next = end_nursery;
- fprintf (gc_debug_file, "Nursery range: %p-%p, size: %td\n", first, next, (char*)next-(char*)first);
-}
-
-/* reduce the info in the pin queue, removing duplicate pointers and sorting them */
-static void
-optimize_pin_queue (int start_slot)
-{
- void **start, **cur, **end;
- /* sort and uniq pin_queue: we just sort and we let the rest discard multiple values */
- /* it may be better to keep ranges of pinned memory instead of individually pinning objects */
- DEBUG (5, fprintf (gc_debug_file, "Sorting pin queue, size: %d\n", next_pin_slot));
- if ((next_pin_slot - start_slot) > 1)
- sort_addresses (pin_queue + start_slot, next_pin_slot - start_slot);
- start = cur = pin_queue + start_slot;
- end = pin_queue + next_pin_slot;
- while (cur < end) {
- *start = *cur++;
- while (*start == *cur && cur < end)
- cur++;
- start++;
- };
- next_pin_slot = start - pin_queue;
- DEBUG (5, fprintf (gc_debug_file, "Pin queue reduced to size: %d\n", next_pin_slot));
- //DEBUG (6, print_nursery_gaps (start_nursery, end_nursery));
-
-}
-
/*
* Scan the memory between start and end and queue values which could be pointers
* to the area between start_nursery and end_nursery for later consideration.
mword addr = (mword)*start;
addr &= ~(ALLOC_ALIGN - 1);
if (addr >= (mword)start_nursery && addr < (mword)end_nursery)
- pin_stage_ptr ((void*)addr);
+ mono_sgen_pin_stage_ptr ((void*)addr);
if (G_UNLIKELY (do_pin_stats)) {
- if (ptr_in_nursery (addr))
- pin_stats_register_address ((char*)addr, pin_type);
+ if (ptr_in_nursery ((void*)addr))
+ mono_sgen_pin_stats_register_address ((char*)addr, pin_type);
}
DEBUG (6, if (count) fprintf (gc_debug_file, "Pinning address %p from %p\n", (void*)addr, start));
count++;
if ((desc & 1) && *start_root) {
copy_func (start_root, queue);
DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
}
desc >>= 1;
start_root++;
if ((bmap & 1) && *objptr) {
copy_func (objptr, queue);
DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
}
bmap >>= 1;
++objptr;
#else
data = major_collector.alloc_heap (alloc_size, 0, DEFAULT_NURSERY_BITS);
#endif
- nursery_start = data;
- nursery_end = nursery_start + nursery_size;
- mono_sgen_update_heap_boundaries ((mword)nursery_start, (mword)nursery_end);
+ mono_sgen_update_heap_boundaries ((mword)data, (mword)(data + nursery_size));
DEBUG (4, fprintf (gc_debug_file, "Expanding nursery size (%p-%p): %lu, total: %lu\n", data, data + alloc_size, (unsigned long)nursery_size, (unsigned long)total_alloc));
section->data = section->next_data = data;
section->size = alloc_size;
- section->end_data = nursery_end;
+ section->end_data = data + nursery_size;
scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
section->scan_starts = mono_sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS);
section->num_scan_start = scan_starts;
nursery_section = section;
- mono_sgen_nursery_allocator_set_nursery_bounds (nursery_start, nursery_end);
+ mono_sgen_nursery_allocator_set_nursery_bounds (data, data + nursery_size);
}
void*
#else
*shift_bits = -1;
#endif
- return nursery_start;
+ return mono_sgen_get_nursery_start ();
}
void
}
}
-static MonoObject **finalized_array = NULL;
-static int finalized_array_capacity = 0;
-static int finalized_array_entries = 0;
-
-static void
-bridge_register_finalized_object (MonoObject *object)
-{
- if (!finalized_array)
- return;
-
- if (finalized_array_entries >= finalized_array_capacity) {
- MonoObject **new_array;
- g_assert (finalized_array_entries == finalized_array_capacity);
- finalized_array_capacity *= 2;
- new_array = mono_sgen_alloc_internal_dynamic (sizeof (MonoObject*) * finalized_array_capacity, INTERNAL_MEM_BRIDGE_DATA);
- memcpy (new_array, finalized_array, sizeof (MonoObject*) * finalized_array_entries);
- mono_sgen_free_internal_dynamic (finalized_array, sizeof (MonoObject*) * finalized_array_entries, INTERNAL_MEM_BRIDGE_DATA);
- finalized_array = new_array;
- }
- finalized_array [finalized_array_entries++] = object;
-}
static void
stw_bridge_process (void)
mono_sgen_get_copy_object (void)
{
if (current_collection_generation == GENERATION_NURSERY) {
- if (collection_is_parallel ())
+ if (mono_sgen_collection_is_parallel ())
return major_collector.copy_object;
else
return major_collector.nopar_copy_object;
{
g_assert (current_collection_generation == GENERATION_NURSERY);
- if (collection_is_parallel ())
+ if (mono_sgen_collection_is_parallel ())
return major_collector.minor_scan_object;
else
return major_collector.nopar_minor_scan_object;
{
g_assert (current_collection_generation == GENERATION_NURSERY);
- if (collection_is_parallel ())
+ if (mono_sgen_collection_is_parallel ())
return major_collector.minor_scan_vtype;
else
return major_collector.nopar_minor_scan_vtype;
* To achieve better cache locality and cache usage, we drain the gray stack
* frequently, after each object is copied, and just finish the work here.
*/
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
TV_GETTIME (atv);
DEBUG (2, fprintf (gc_debug_file, "%s generation done\n", generation_name (generation)));
+ /*
+ Reset bridge data, we might have lingering data from a previous collection if this is a major
+ collection trigged by minor overflow.
+
+ We must reset the gathered bridges since their original block might be evacuated due to major
+ fragmentation in the meanwhile and the bridge code should not have to deal with that.
+ */
+ mono_sgen_bridge_reset_data ();
+
/*
* Walk the ephemeron tables marking all values with reachable keys. This must be completely done
* before processing finalizable objects or non-tracking weak hamdle to avoid finalizing/clearing
done_with_ephemerons = 0;
do {
done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
++ephemeron_rounds;
} while (!done_with_ephemerons);
mono_sgen_scan_togglerefs (copy_func, start_addr, end_addr, queue);
if (generation == GENERATION_OLD)
- mono_sgen_scan_togglerefs (copy_func, nursery_start, nursery_end, queue);
+ mono_sgen_scan_togglerefs (copy_func, mono_sgen_get_nursery_start (), mono_sgen_get_nursery_end (), queue);
if (mono_sgen_need_bridge_processing ()) {
- if (finalized_array == NULL) {
- finalized_array_capacity = 32;
- finalized_array = mono_sgen_alloc_internal_dynamic (sizeof (MonoObject*) * finalized_array_capacity, INTERNAL_MEM_BRIDGE_DATA);
- }
- finalized_array_entries = 0;
-
collect_bridge_objects (copy_func, start_addr, end_addr, generation, queue);
if (generation == GENERATION_OLD)
- collect_bridge_objects (copy_func, nursery_start, nursery_end, GENERATION_NURSERY, queue);
-
- if (finalized_array_entries > 0) {
- mono_sgen_bridge_processing_register_objects (finalized_array_entries, finalized_array);
- finalized_array_entries = 0;
- }
- drain_gray_stack (queue, -1);
+ collect_bridge_objects (copy_func, mono_sgen_get_nursery_start (), mono_sgen_get_nursery_end (), GENERATION_NURSERY, queue);
+ mono_sgen_drain_gray_stack (queue, -1);
}
/*
fin_ready = num_ready_finalizers;
finalize_in_range (copy_func, start_addr, end_addr, generation, queue);
if (generation == GENERATION_OLD)
- finalize_in_range (copy_func, nursery_start, nursery_end, GENERATION_NURSERY, queue);
+ finalize_in_range (copy_func, mono_sgen_get_nursery_start (), mono_sgen_get_nursery_end (), GENERATION_NURSERY, queue);
if (fin_ready != num_ready_finalizers)
++num_loops;
/* drain the new stack that might have been created */
DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area post fin\n"));
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
} while (fin_ready != num_ready_finalizers);
if (mono_sgen_need_bridge_processing ())
done_with_ephemerons = 0;
do {
done_with_ephemerons = mark_ephemerons_in_range (copy_func, start_addr, end_addr, queue);
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
++ephemeron_rounds;
} while (!done_with_ephemerons);
* GC a finalized object my lose the monitor because it is cleared before the finalizer is
* called.
*/
- g_assert (gray_object_queue_is_empty (queue));
+ g_assert (mono_sgen_gray_object_queue_is_empty (queue));
for (;;) {
null_link_in_range (copy_func, start_addr, end_addr, generation, FALSE, queue);
if (generation == GENERATION_OLD)
null_link_in_range (copy_func, start_addr, end_addr, GENERATION_NURSERY, FALSE, queue);
- if (gray_object_queue_is_empty (queue))
+ if (mono_sgen_gray_object_queue_is_empty (queue))
break;
- drain_gray_stack (queue, -1);
+ mono_sgen_drain_gray_stack (queue, -1);
}
- g_assert (gray_object_queue_is_empty (queue));
+ g_assert (mono_sgen_gray_object_queue_is_empty (queue));
}
void
major_collector.check_scan_starts ();
}
-static int last_num_pinned = 0;
-
static void
scan_from_registered_roots (CopyOrMarkObjectFunc copy_func, char *addr_start, char *addr_end, int root_type, GrayQueue *queue)
{
fprintf (heap_dump_file, ">\n");
fprintf (heap_dump_file, "<other-mem-usage type=\"mempools\" size=\"%ld\"/>\n", mono_mempool_get_bytes_allocated ());
mono_sgen_dump_internal_mem_usage (heap_dump_file);
- fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_STACK]);
+ fprintf (heap_dump_file, "<pinned type=\"stack\" bytes=\"%zu\"/>\n", mono_sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_STACK));
/* fprintf (heap_dump_file, "<pinned type=\"static-data\" bytes=\"%d\"/>\n", pinned_byte_counts [PIN_TYPE_STATIC_DATA]); */
- fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", pinned_byte_counts [PIN_TYPE_OTHER]);
+ fprintf (heap_dump_file, "<pinned type=\"other\" bytes=\"%zu\"/>\n", mono_sgen_pin_stats_get_pinned_byte_count (PIN_TYPE_OTHER));
fprintf (heap_dump_file, "<pinned-objects>\n");
- for (list = pinned_objects; list; list = list->next)
+ for (list = mono_sgen_pin_stats_get_object_list (); list; list = list->next)
dump_object (list->obj, TRUE);
fprintf (heap_dump_file, "</pinned-objects>\n");
g_assert (mono_profiler_events & MONO_PROFILE_GC_MOVES);
/* FIXME: handle this for parallel collector */
- g_assert (!collection_is_parallel ());
+ g_assert (!mono_sgen_collection_is_parallel ());
if (moved_objects_idx == MOVED_OBJECTS_NUM) {
mono_profiler_gc_moves (moved_objects, moved_objects_idx);
if (inited)
return;
- mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pre_collection_fragment_clear);
- mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_pinning);
- mono_counters_register ("Minor scan remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_remsets);
- mono_counters_register ("Minor scan cardtables", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_card_table);
- mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_pinned);
- mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_registered_roots);
- mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_scan_thread_data);
- mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_finish_gray_stack);
- mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_minor_fragment_creation);
-
- mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pre_collection_fragment_clear);
- mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_pinning);
- mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_pinned);
- mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_registered_roots);
- mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_thread_data);
- mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_alloc_pinned);
- mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_finalized);
- mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_scan_big_objects);
- mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_finish_gray_stack);
- mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_free_bigobjs);
- mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_los_sweep);
- mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_sweep);
- mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_LONG, &time_major_fragment_creation);
+ mono_counters_register ("Minor fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pre_collection_fragment_clear);
+ mono_counters_register ("Minor pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_pinning);
+ mono_counters_register ("Minor scan remembered set", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_remsets);
+ mono_counters_register ("Minor scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_pinned);
+ mono_counters_register ("Minor scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_registered_roots);
+ mono_counters_register ("Minor scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_scan_thread_data);
+ mono_counters_register ("Minor finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_finish_gray_stack);
+ mono_counters_register ("Minor fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_minor_fragment_creation);
+
+ mono_counters_register ("Major fragment clear", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pre_collection_fragment_clear);
+ mono_counters_register ("Major pinning", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_pinning);
+ mono_counters_register ("Major scan pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_pinned);
+ mono_counters_register ("Major scan registered roots", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_registered_roots);
+ mono_counters_register ("Major scan thread data", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_thread_data);
+ mono_counters_register ("Major scan alloc_pinned", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_alloc_pinned);
+ mono_counters_register ("Major scan finalized", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_finalized);
+ mono_counters_register ("Major scan big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_scan_big_objects);
+ mono_counters_register ("Major finish gray stack", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_finish_gray_stack);
+ mono_counters_register ("Major free big objects", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_free_bigobjs);
+ mono_counters_register ("Major LOS sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_los_sweep);
+ mono_counters_register ("Major sweep", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_sweep);
+ mono_counters_register ("Major fragment creation", MONO_COUNTER_GC | MONO_COUNTER_TIME_INTERVAL, &time_major_fragment_creation);
mono_counters_register ("Number of pinned objects", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_objects);
mono_counters_register ("WBarrier set arrayref", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_arrayref);
mono_counters_register ("WBarrier arrayref copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_arrayref_copy);
mono_counters_register ("WBarrier generic store called", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store);
- mono_counters_register ("WBarrier generic store stored", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_generic_store_remset);
mono_counters_register ("WBarrier set root", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_set_root);
mono_counters_register ("WBarrier value copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_value_copy);
mono_counters_register ("WBarrier object copy", MONO_COUNTER_GC | MONO_COUNTER_INT, &stat_wbarrier_object_copy);
- mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced);
- mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced);
mono_counters_register ("# objects allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced_degraded);
mono_counters_register ("bytes allocated degraded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_degraded);
- mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
mono_counters_register ("# copy_object() called (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_copy_object_called_nursery);
mono_counters_register ("# objects copied (nursery)", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_copied_nursery);
mono_counters_register ("# nursery copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_nursery_copy_object_failed_pinned);
mono_sgen_nursery_allocator_init_heavy_stats ();
-
- mono_counters_register ("Store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets);
- mono_counters_register ("Unique store remsets", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_store_remsets_unique);
- mono_counters_register ("Saved remsets 1", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_1);
- mono_counters_register ("Saved remsets 2", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_saved_remsets_2);
- mono_counters_register ("Non-global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_local_remsets_processed);
- mono_counters_register ("Global remsets added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_added);
- mono_counters_register ("Global remsets re-added", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_readded);
- mono_counters_register ("Global remsets processed", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_processed);
- mono_counters_register ("Global remsets discarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_global_remsets_discarded);
+ mono_sgen_alloc_init_heavy_stats ();
#endif
inited = TRUE;
bytes_pinned_from_failed_allocation += objsize;
}
-static gboolean
-collection_is_parallel (void)
+gboolean
+mono_sgen_collection_is_parallel (void)
{
switch (current_collection_generation) {
case GENERATION_NURSERY:
return nursery_collection_is_parallel;
}
-static GrayQueue*
-job_gray_queue (WorkerData *worker_data)
-{
- return worker_data ? &worker_data->private_gray_queue : WORKERS_DISTRIBUTE_GRAY_QUEUE;
-}
-
typedef struct
{
char *heap_start;
char *heap_end;
-} ScanFromRemsetsJobData;
+} FinishRememberedSetScanJobData;
static void
-job_scan_from_remsets (WorkerData *worker_data, void *job_data_untyped)
+job_finish_remembered_set_scan (WorkerData *worker_data, void *job_data_untyped)
{
- ScanFromRemsetsJobData *job_data = job_data_untyped;
+ FinishRememberedSetScanJobData *job_data = job_data_untyped;
- scan_from_remsets (job_data->heap_start, job_data->heap_end, job_gray_queue (worker_data));
+ remset.finish_scan_remsets (job_data->heap_start, job_data->heap_end, mono_sgen_workers_get_job_gray_queue (worker_data));
}
typedef struct
scan_from_registered_roots (job_data->func,
job_data->heap_start, job_data->heap_end,
job_data->root_type,
- job_gray_queue (worker_data));
+ mono_sgen_workers_get_job_gray_queue (worker_data));
}
typedef struct
ScanThreadDataJobData *job_data = job_data_untyped;
scan_thread_data (job_data->heap_start, job_data->heap_end, TRUE,
- job_gray_queue (worker_data));
+ mono_sgen_workers_get_job_gray_queue (worker_data));
}
static void
/*This cleans up unused fragments */
mono_sgen_nursery_allocator_prepare_for_pinning ();
- hole_start = start = cur = nursery_start;
- end = nursery_end;
+ hole_start = start = cur = mono_sgen_get_nursery_start ();
+ end = mono_sgen_get_nursery_end ();
while (cur < end) {
size_t ss, size;
gboolean needs_major;
size_t max_garbage_amount;
char *nursery_next;
- ScanFromRemsetsJobData sfrjd;
+ FinishRememberedSetScanJobData frssjd;
ScanFromRegisteredRootsJobData scrrjd_normal, scrrjd_wbarrier;
ScanThreadDataJobData stdjd;
mword fragment_total;
objects_pinned = 0;
nursery_next = mono_sgen_nursery_alloc_get_upper_alloc_bound ();
/* FIXME: optimize later to use the higher address where an object can be present */
- nursery_next = MAX (nursery_next, nursery_end);
-
- nursery_alloc_bound = nursery_next;
+ nursery_next = MAX (nursery_next, mono_sgen_get_nursery_end ());
- DEBUG (1, fprintf (gc_debug_file, "Start nursery collection %d %p-%p, size: %d\n", num_minor_gcs, nursery_start, nursery_next, (int)(nursery_next - nursery_start)));
- max_garbage_amount = nursery_next - nursery_start;
+ DEBUG (1, fprintf (gc_debug_file, "Start nursery collection %d %p-%p, size: %d\n", stat_minor_gcs, mono_sgen_get_nursery_start (), nursery_next, (int)(nursery_next - mono_sgen_get_nursery_start ())));
+ max_garbage_amount = nursery_next - mono_sgen_get_nursery_start ();
g_assert (nursery_section->size >= max_garbage_amount);
/* world must be stopped already */
mono_sgen_clear_current_nursery_fragment ();
TV_GETTIME (btv);
- time_minor_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
+ time_minor_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
if (xdomain_checks)
check_for_xdomain_refs ();
try_calculate_minor_collection_allowance (FALSE);
- gray_object_queue_init (&gray_queue);
- workers_init_distribute_gray_queue ();
+ mono_sgen_gray_object_queue_init (&gray_queue);
+ mono_sgen_workers_init_distribute_gray_queue ();
- num_minor_gcs++;
+ stat_minor_gcs++;
mono_stats.minor_gc_count ++;
- global_remset_cache_clear ();
+ if (remset.prepare_for_minor_collection)
+ remset.prepare_for_minor_collection ();
process_fin_stage_entries ();
process_dislink_stage_entries ();
/* pin from pinned handles */
- init_pinning ();
+ mono_sgen_init_pinning ();
mono_profiler_gc_event (MONO_GC_EVENT_MARK_START, 0);
- pin_from_roots (nursery_start, nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ pin_from_roots (mono_sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
/* identify pinned objects */
- optimize_pin_queue (0);
- next_pin_slot = pin_objects_from_addresses (nursery_section, pin_queue, pin_queue + next_pin_slot, nursery_start, nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
- nursery_section->pin_queue_start = pin_queue;
- nursery_section->pin_queue_num_entries = next_pin_slot;
+ mono_sgen_optimize_pin_queue (0);
+ mono_sgen_pinning_setup_section (nursery_section);
+ mono_sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
+
TV_GETTIME (atv);
- time_minor_pinning += TV_ELAPSED_MS (btv, atv);
- DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (btv, atv)));
- DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
+ time_minor_pinning += TV_ELAPSED (btv, atv);
+ DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", mono_sgen_get_pinned_count (), TV_ELAPSED (btv, atv)));
+ DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", mono_sgen_get_pinned_count ()));
if (consistency_check_at_minor_collection)
- check_consistency ();
+ mono_sgen_check_consistency ();
- workers_start_all_workers ();
+ mono_sgen_workers_start_all_workers ();
/*
- * Walk all the roots and copy the young objects to the old
- * generation, starting from to_space.
- *
- * The global remsets must be processed before the workers start
- * marking because they might add global remsets.
+ * Perform the sequential part of remembered set scanning.
+ * This usually involves scanning global information that might later be produced by evacuation.
*/
- scan_from_global_remsets (nursery_start, nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ if (remset.begin_scan_remsets)
+ remset.begin_scan_remsets (mono_sgen_get_nursery_start (), nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
- workers_start_marking ();
+ mono_sgen_workers_start_marking ();
- sfrjd.heap_start = nursery_start;
- sfrjd.heap_end = nursery_next;
- workers_enqueue_job (job_scan_from_remsets, &sfrjd);
+ frssjd.heap_start = mono_sgen_get_nursery_start ();
+ frssjd.heap_end = nursery_next;
+ mono_sgen_workers_enqueue_job (job_finish_remembered_set_scan, &frssjd);
/* we don't have complete write barrier yet, so we scan all the old generation sections */
TV_GETTIME (btv);
- time_minor_scan_remsets += TV_ELAPSED_MS (atv, btv);
+ time_minor_scan_remsets += TV_ELAPSED (atv, btv);
DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (atv, btv)));
- if (use_cardtable) {
- atv = btv;
- card_tables_collect_stats (TRUE);
- scan_from_card_tables (nursery_start, nursery_next, WORKERS_DISTRIBUTE_GRAY_QUEUE);
- TV_GETTIME (btv);
- time_minor_scan_card_table += TV_ELAPSED_MS (atv, btv);
- }
-
- if (!collection_is_parallel ())
- drain_gray_stack (&gray_queue, -1);
+ if (!mono_sgen_collection_is_parallel ())
+ mono_sgen_drain_gray_stack (&gray_queue, -1);
if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
report_registered_roots ();
if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
report_finalizer_roots ();
TV_GETTIME (atv);
- time_minor_scan_pinned += TV_ELAPSED_MS (btv, atv);
+ time_minor_scan_pinned += TV_ELAPSED (btv, atv);
/* registered roots, this includes static fields */
- scrrjd_normal.func = collection_is_parallel () ? major_collector.copy_object : major_collector.nopar_copy_object;
- scrrjd_normal.heap_start = nursery_start;
+ scrrjd_normal.func = mono_sgen_collection_is_parallel () ? major_collector.copy_object : major_collector.nopar_copy_object;
+ scrrjd_normal.heap_start = mono_sgen_get_nursery_start ();
scrrjd_normal.heap_end = nursery_next;
scrrjd_normal.root_type = ROOT_TYPE_NORMAL;
- workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
+ mono_sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
- scrrjd_wbarrier.func = collection_is_parallel () ? major_collector.copy_object : major_collector.nopar_copy_object;
- scrrjd_wbarrier.heap_start = nursery_start;
+ scrrjd_wbarrier.func = mono_sgen_collection_is_parallel () ? major_collector.copy_object : major_collector.nopar_copy_object;
+ scrrjd_wbarrier.heap_start = mono_sgen_get_nursery_start ();
scrrjd_wbarrier.heap_end = nursery_next;
scrrjd_wbarrier.root_type = ROOT_TYPE_WBARRIER;
- workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
+ mono_sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
TV_GETTIME (btv);
- time_minor_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
+ time_minor_scan_registered_roots += TV_ELAPSED (atv, btv);
/* thread data */
- stdjd.heap_start = nursery_start;
+ stdjd.heap_start = mono_sgen_get_nursery_start ();
stdjd.heap_end = nursery_next;
- workers_enqueue_job (job_scan_thread_data, &stdjd);
+ mono_sgen_workers_enqueue_job (job_scan_thread_data, &stdjd);
TV_GETTIME (atv);
- time_minor_scan_thread_data += TV_ELAPSED_MS (btv, atv);
+ time_minor_scan_thread_data += TV_ELAPSED (btv, atv);
btv = atv;
- if (collection_is_parallel ()) {
- while (!gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
- workers_distribute_gray_queue_sections ();
+ if (mono_sgen_collection_is_parallel ()) {
+ while (!mono_sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
+ mono_sgen_workers_distribute_gray_queue_sections ();
g_usleep (1000);
}
}
- workers_join ();
+ mono_sgen_workers_join ();
- if (collection_is_parallel ())
- g_assert (gray_object_queue_is_empty (&gray_queue));
+ if (mono_sgen_collection_is_parallel ())
+ g_assert (mono_sgen_gray_object_queue_is_empty (&gray_queue));
- finish_gray_stack (nursery_start, nursery_next, GENERATION_NURSERY, &gray_queue);
+ finish_gray_stack (mono_sgen_get_nursery_start (), nursery_next, GENERATION_NURSERY, &gray_queue);
TV_GETTIME (atv);
- time_minor_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
+ time_minor_finish_gray_stack += TV_ELAPSED (btv, atv);
mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
/*
* worker data here instead of earlier when we joined the
* workers.
*/
- if (major_collector.reset_worker_data)
- major_collector.reset_worker_data (workers_gc_thread_data.major_collector_data);
+ mono_sgen_workers_reset_data ();
if (objects_pinned) {
- optimize_pin_queue (0);
- nursery_section->pin_queue_start = pin_queue;
- nursery_section->pin_queue_num_entries = next_pin_slot;
+ mono_sgen_optimize_pin_queue (0);
+ mono_sgen_pinning_setup_section (nursery_section);
}
/* walk the pin_queue, build up the fragment list of free memory, unmark
* next allocations.
*/
mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_START, 0);
- fragment_total = mono_sgen_build_nursery_fragments (nursery_section, pin_queue, next_pin_slot);
+ fragment_total = mono_sgen_build_nursery_fragments (nursery_section, nursery_section->pin_queue_start, nursery_section->pin_queue_num_entries);
if (!fragment_total)
degraded_mode = 1;
/* Clear TLABs for all threads */
- clear_tlabs ();
+ mono_sgen_clear_tlabs ();
mono_profiler_gc_event (MONO_GC_EVENT_RECLAIM_END, 0);
TV_GETTIME (btv);
- time_minor_fragment_creation += TV_ELAPSED_MS (atv, btv);
+ time_minor_fragment_creation += TV_ELAPSED (atv, btv);
DEBUG (2, fprintf (gc_debug_file, "Fragment creation: %d usecs, %lu bytes available\n", TV_ELAPSED (atv, btv), (unsigned long)fragment_total));
if (consistency_check_at_minor_collection)
- check_major_refs ();
+ mono_sgen_check_major_refs ();
major_collector.finish_nursery_collection ();
mono_stats.minor_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
if (heap_dump_file)
- dump_heap ("minor", num_minor_gcs - 1, NULL);
+ dump_heap ("minor", stat_minor_gcs - 1, NULL);
/* prepare the pin queue for the next collection */
- last_num_pinned = next_pin_slot;
- next_pin_slot = 0;
+ mono_sgen_finish_pinning ();
if (fin_ready_list || critical_fin_list) {
DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
mono_gc_finalize_notify ();
}
- pin_stats_reset ();
+ mono_sgen_pin_stats_reset ();
- g_assert (gray_object_queue_is_empty (&gray_queue));
+ g_assert (mono_sgen_gray_object_queue_is_empty (&gray_queue));
- if (use_cardtable)
- card_tables_collect_stats (FALSE);
+ if (remset.finish_minor_collection)
+ remset.finish_minor_collection ();
check_scan_starts ();
return needs_major;
}
+void
+mono_sgen_collect_nursery_no_lock (size_t requested_size)
+{
+ gint64 gc_start_time;
+
+ mono_profiler_gc_event (MONO_GC_EVENT_START, 0);
+ gc_start_time = mono_100ns_ticks ();
+
+ stop_world (0);
+ collect_nursery (requested_size);
+ restart_world (0);
+
+ mono_trace_message (MONO_TRACE_GC, "minor gc took %d usecs", (mono_100ns_ticks () - gc_start_time) / 10);
+ mono_profiler_gc_event (MONO_GC_EVENT_END, 0);
+}
+
typedef struct
{
FinalizeReadyEntry *list;
scan_finalizer_entries (major_collector.copy_or_mark_object,
job_data->list,
- job_gray_queue (worker_data));
+ mono_sgen_workers_get_job_gray_queue (worker_data));
}
static gboolean
binary_protocol_collection (GENERATION_OLD);
check_scan_starts ();
- gray_object_queue_init (&gray_queue);
- workers_init_distribute_gray_queue ();
+ mono_sgen_gray_object_queue_init (&gray_queue);
+ mono_sgen_workers_init_distribute_gray_queue ();
degraded_mode = 0;
- DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", num_major_gcs));
- num_major_gcs++;
+ DEBUG (1, fprintf (gc_debug_file, "Start major collection %d\n", stat_major_gcs));
+ stat_major_gcs++;
mono_stats.major_gc_count ++;
/* world must be stopped already */
mono_sgen_clear_nursery_fragments ();
TV_GETTIME (btv);
- time_major_pre_collection_fragment_clear += TV_ELAPSED_MS (atv, btv);
+ time_major_pre_collection_fragment_clear += TV_ELAPSED (atv, btv);
- nursery_section->next_data = nursery_end;
+ nursery_section->next_data = mono_sgen_get_nursery_end ();
/* we should also coalesce scanning from sections close to each other
* and deal with pointers outside of the sections later.
*/
if (xdomain_checks)
check_for_xdomain_refs ();
- /* The remsets are not useful for a major collection */
- clear_remsets ();
- global_remset_cache_clear ();
- if (use_cardtable)
- card_table_clear ();
+ /* Remsets are not useful for a major collection */
+ remset.prepare_for_major_collection ();
process_fin_stage_entries ();
process_dislink_stage_entries ();
TV_GETTIME (atv);
- init_pinning ();
+ mono_sgen_init_pinning ();
DEBUG (6, fprintf (gc_debug_file, "Collecting pinned addresses\n"));
pin_from_roots ((void*)lowest_heap_address, (void*)highest_heap_address, WORKERS_DISTRIBUTE_GRAY_QUEUE);
- optimize_pin_queue (0);
+ mono_sgen_optimize_pin_queue (0);
/*
* pin_queue now contains all candidate pointers, sorted and
DEBUG (6, fprintf (gc_debug_file, "Pinning from large objects\n"));
for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) {
int dummy;
+ gboolean profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
+ GCRootReport report;
+ report.count = 0;
if (mono_sgen_find_optimized_pin_queue_area (bigobj->data, (char*)bigobj->data + bigobj->size, &dummy)) {
binary_protocol_pin (bigobj->data, (gpointer)LOAD_VTABLE (bigobj->data), safe_object_get_size (bigobj->data));
pin_object (bigobj->data);
if (G_UNLIKELY (do_pin_stats))
mono_sgen_pin_stats_register_object ((char*) bigobj->data, safe_object_get_size ((MonoObject*) bigobj->data));
DEBUG (6, fprintf (gc_debug_file, "Marked large object %p (%s) size: %lu from roots\n", bigobj->data, safe_name (bigobj->data), (unsigned long)bigobj->size));
+
+ if (profile_roots)
+ add_profile_gc_root (&report, bigobj->data, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
}
+ if (profile_roots)
+ notify_gc_roots (&report);
}
/* second pass for the sections */
mono_sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
- old_next_pin_slot = next_pin_slot;
+ old_next_pin_slot = mono_sgen_get_pinned_count ();
TV_GETTIME (btv);
- time_major_pinning += TV_ELAPSED_MS (atv, btv);
- DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", next_pin_slot, TV_ELAPSED (atv, btv)));
- DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", next_pin_slot));
+ time_major_pinning += TV_ELAPSED (atv, btv);
+ DEBUG (2, fprintf (gc_debug_file, "Finding pinned pointers: %d in %d usecs\n", mono_sgen_get_pinned_count (), TV_ELAPSED (atv, btv)));
+ DEBUG (4, fprintf (gc_debug_file, "Start scan with %d pinned objects\n", mono_sgen_get_pinned_count ()));
major_collector.init_to_space ();
main_gc_thread = mono_native_thread_self ();
#endif
- workers_start_all_workers ();
- workers_start_marking ();
+ mono_sgen_workers_start_all_workers ();
+ mono_sgen_workers_start_marking ();
if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
report_registered_roots ();
TV_GETTIME (atv);
- time_major_scan_pinned += TV_ELAPSED_MS (btv, atv);
+ time_major_scan_pinned += TV_ELAPSED (btv, atv);
/* registered roots, this includes static fields */
scrrjd_normal.func = major_collector.copy_or_mark_object;
scrrjd_normal.heap_start = heap_start;
scrrjd_normal.heap_end = heap_end;
scrrjd_normal.root_type = ROOT_TYPE_NORMAL;
- workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
+ mono_sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_normal);
scrrjd_wbarrier.func = major_collector.copy_or_mark_object;
scrrjd_wbarrier.heap_start = heap_start;
scrrjd_wbarrier.heap_end = heap_end;
scrrjd_wbarrier.root_type = ROOT_TYPE_WBARRIER;
- workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
+ mono_sgen_workers_enqueue_job (job_scan_from_registered_roots, &scrrjd_wbarrier);
TV_GETTIME (btv);
- time_major_scan_registered_roots += TV_ELAPSED_MS (atv, btv);
+ time_major_scan_registered_roots += TV_ELAPSED (atv, btv);
/* Threads */
stdjd.heap_start = heap_start;
stdjd.heap_end = heap_end;
- workers_enqueue_job (job_scan_thread_data, &stdjd);
+ mono_sgen_workers_enqueue_job (job_scan_thread_data, &stdjd);
TV_GETTIME (atv);
- time_major_scan_thread_data += TV_ELAPSED_MS (btv, atv);
+ time_major_scan_thread_data += TV_ELAPSED (btv, atv);
TV_GETTIME (btv);
- time_major_scan_alloc_pinned += TV_ELAPSED_MS (atv, btv);
+ time_major_scan_alloc_pinned += TV_ELAPSED (atv, btv);
if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
report_finalizer_roots ();
/* scan the list of objects ready for finalization */
sfejd_fin_ready.list = fin_ready_list;
- workers_enqueue_job (job_scan_finalizer_entries, &sfejd_fin_ready);
+ mono_sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_fin_ready);
sfejd_critical_fin.list = critical_fin_list;
- workers_enqueue_job (job_scan_finalizer_entries, &sfejd_critical_fin);
+ mono_sgen_workers_enqueue_job (job_scan_finalizer_entries, &sfejd_critical_fin);
TV_GETTIME (atv);
- time_major_scan_finalized += TV_ELAPSED_MS (btv, atv);
+ time_major_scan_finalized += TV_ELAPSED (btv, atv);
DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
TV_GETTIME (btv);
- time_major_scan_big_objects += TV_ELAPSED_MS (atv, btv);
+ time_major_scan_big_objects += TV_ELAPSED (atv, btv);
if (major_collector.is_parallel) {
- while (!gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
- workers_distribute_gray_queue_sections ();
+ while (!mono_sgen_gray_object_queue_is_empty (WORKERS_DISTRIBUTE_GRAY_QUEUE)) {
+ mono_sgen_workers_distribute_gray_queue_sections ();
g_usleep (1000);
}
}
- workers_join ();
+ mono_sgen_workers_join ();
#ifdef SGEN_DEBUG_INTERNAL_ALLOC
main_gc_thread = NULL;
#endif
if (major_collector.is_parallel)
- g_assert (gray_object_queue_is_empty (&gray_queue));
+ g_assert (mono_sgen_gray_object_queue_is_empty (&gray_queue));
/* all the objects in the heap */
finish_gray_stack (heap_start, heap_end, GENERATION_OLD, &gray_queue);
TV_GETTIME (atv);
- time_major_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
+ time_major_finish_gray_stack += TV_ELAPSED (btv, atv);
/*
* The (single-threaded) finalization code might have done
* worker data here instead of earlier when we joined the
* workers.
*/
- if (major_collector.reset_worker_data)
- major_collector.reset_worker_data (workers_gc_thread_data.major_collector_data);
+ mono_sgen_workers_reset_data ();
if (objects_pinned) {
/*This is slow, but we just OOM'd*/
mono_sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
- optimize_pin_queue (0);
+ mono_sgen_optimize_pin_queue (0);
mono_sgen_find_section_pin_queue_start_end (nursery_section);
objects_pinned = 0;
}
reset_heap_boundaries ();
- mono_sgen_update_heap_boundaries ((mword)nursery_start, (mword)nursery_end);
+ mono_sgen_update_heap_boundaries ((mword)mono_sgen_get_nursery_start (), (mword)mono_sgen_get_nursery_end ());
/* sweep the big objects list */
prevbo = NULL;
}
TV_GETTIME (btv);
- time_major_free_bigobjs += TV_ELAPSED_MS (atv, btv);
+ time_major_free_bigobjs += TV_ELAPSED (atv, btv);
mono_sgen_los_sweep ();
TV_GETTIME (atv);
- time_major_los_sweep += TV_ELAPSED_MS (btv, atv);
+ time_major_los_sweep += TV_ELAPSED (btv, atv);
major_collector.sweep ();
TV_GETTIME (btv);
- time_major_sweep += TV_ELAPSED_MS (atv, btv);
+ time_major_sweep += TV_ELAPSED (atv, btv);
/* walk the pin_queue, build up the fragment list of free memory, unmark
* pinned objects as we go, memzero() the empty fragments so they are ready for the
degraded_mode = 1;
/* Clear TLABs for all threads */
- clear_tlabs ();
+ mono_sgen_clear_tlabs ();
TV_GETTIME (atv);
- time_major_fragment_creation += TV_ELAPSED_MS (btv, atv);
+ time_major_fragment_creation += TV_ELAPSED (btv, atv);
TV_GETTIME (all_btv);
mono_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
if (heap_dump_file)
- dump_heap ("major", num_major_gcs - 1, reason);
+ dump_heap ("major", stat_major_gcs - 1, reason);
/* prepare the pin queue for the next collection */
- next_pin_slot = 0;
+ mono_sgen_finish_pinning ();
+
if (fin_ready_list || critical_fin_list) {
DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
mono_gc_finalize_notify ();
}
- pin_stats_reset ();
+ mono_sgen_pin_stats_reset ();
- g_assert (gray_object_queue_is_empty (&gray_queue));
+ g_assert (mono_sgen_gray_object_queue_is_empty (&gray_queue));
try_calculate_minor_collection_allowance (TRUE);
/* this also sets the proper pointers for the next allocation */
if (!mono_sgen_can_alloc_size (size)) {
- int i;
/* TypeBuilder and MonoMethod are killing mcs with fragmentation */
- DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size, last_num_pinned));
- for (i = 0; i < last_num_pinned; ++i) {
- DEBUG (3, fprintf (gc_debug_file, "Bastard pinning obj %p (%s), size: %d\n", pin_queue [i], safe_name (pin_queue [i]), safe_object_get_size (pin_queue [i])));
- }
+ DEBUG (1, fprintf (gc_debug_file, "nursery collection didn't find enough room for %zd alloc (%d pinned)\n", size, mono_sgen_get_pinned_count ()));
+ mono_sgen_dump_pin_queue ();
degraded_mode = 1;
}
mono_profiler_gc_event (MONO_GC_EVENT_END, 0);
//report_internal_mem_usage ();
}
+void
+mono_sgen_minor_collect_or_expand_inner (size_t size)
+{
+ minor_collect_or_expand_inner (size);
+}
+
/*
* ######################################################################
* ######## Memory allocation from the OS
/*
* ######################################################################
- * ######## Object allocation
+ * ######## Finalization support
* ######################################################################
- * This section of code deals with allocating memory for objects.
- * There are several ways:
- * *) allocate large objects
- * *) allocate normal objects
- * *) fast lock-free allocation
- * *) allocation of pinned objects
*/
-static inline void
-set_nursery_scan_start (char *p)
+/*
+ * this is valid for the nursery: if the object has been forwarded it means it's
+ * still refrenced from a root. If it is pinned it's still alive as well.
+ * Return TRUE if @obj is ready to be finalized.
+ */
+#define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
+
+
+gboolean
+mono_sgen_gc_is_object_ready_for_finalization (void *object)
{
- int idx = (p - (char*)nursery_section->data) / SCAN_START_SIZE;
- char *old = nursery_section->scan_starts [idx];
- if (!old || old > p)
- nursery_section->scan_starts [idx] = p;
+ return !major_collector.is_object_live (object) && object_is_fin_ready (object);
}
-static void*
-alloc_degraded (MonoVTable *vtable, size_t size, gboolean for_mature)
-{
- static int last_major_gc_warned = -1;
- static int num_degraded = 0;
-
- if (!for_mature) {
- if (last_major_gc_warned < num_major_gcs) {
- ++num_degraded;
- if (num_degraded == 1 || num_degraded == 3)
- fprintf (stderr, "Warning: Degraded allocation. Consider increasing nursery-size if the warning persists.\n");
- else if (num_degraded == 10)
- fprintf (stderr, "Warning: Repeated degraded allocation. Consider increasing nursery-size.\n");
- last_major_gc_warned = num_major_gcs;
- }
- }
+static gboolean
+has_critical_finalizer (MonoObject *obj)
+{
+ MonoClass *class;
- if (need_major_collection (0)) {
- gint64 gc_start_time;
+ if (!mono_defaults.critical_finalizer_object)
+ return FALSE;
- mono_profiler_gc_event (MONO_GC_EVENT_START, 1);
- gc_start_time = mono_100ns_ticks ();
+ class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
- stop_world (1);
- major_collection ("degraded overflow");
- restart_world (1);
+ return mono_class_has_parent_fast (class, mono_defaults.critical_finalizer_object);
+}
- mono_trace_message (MONO_TRACE_GC, "major gc took %d usecs", (mono_100ns_ticks () - gc_start_time) / 10);
- mono_profiler_gc_event (MONO_GC_EVENT_END, 1);
+static void
+queue_finalization_entry (MonoObject *obj) {
+ FinalizeReadyEntry *entry = mono_sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
+ entry->object = obj;
+ if (has_critical_finalizer (obj)) {
+ entry->next = critical_fin_list;
+ critical_fin_list = entry;
+ } else {
+ entry->next = fin_ready_list;
+ fin_ready_list = entry;
}
-
- return major_collector.alloc_degraded (vtable, size);
}
-/*
- * Provide a variant that takes just the vtable for small fixed-size objects.
- * The aligned size is already computed and stored in vt->gc_descr.
- * Note: every SCAN_START_SIZE or so we are given the chance to do some special
- * processing. We can keep track of where objects start, for example,
- * so when we scan the thread stacks for pinned objects, we can start
- * a search for the pinned object in SCAN_START_SIZE chunks.
- */
-static void*
-mono_gc_alloc_obj_nolock (MonoVTable *vtable, size_t size)
+static int
+object_is_reachable (char *object, char *start, char *end)
{
- /* FIXME: handle OOM */
- void **p;
- char *new_next;
- TLAB_ACCESS_INIT;
-
- HEAVY_STAT (++stat_objects_alloced);
- if (size <= MAX_SMALL_OBJ_SIZE)
- HEAVY_STAT (stat_bytes_alloced += size);
- else
- HEAVY_STAT (stat_bytes_alloced_los += size);
+ /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
+ if (object < start || object >= end)
+ return TRUE;
+ return !object_is_fin_ready (object) || major_collector.is_object_live (object);
+}
- size = ALIGN_UP (size);
+#include "sgen-fin-weak-hash.c"
- g_assert (vtable->gc_descr);
+gboolean
+mono_sgen_object_is_live (void *obj)
+{
+ if (ptr_in_nursery (obj))
+ return object_is_pinned (obj);
+ if (current_collection_generation == GENERATION_NURSERY)
+ return FALSE;
+ return major_collector.is_object_live (obj);
+}
- if (G_UNLIKELY (collect_before_allocs)) {
- static int alloc_count;
+/* LOCKING: requires that the GC lock is held */
+static void
+null_ephemerons_for_domain (MonoDomain *domain)
+{
+ EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
- InterlockedIncrement (&alloc_count);
- if (((alloc_count % collect_before_allocs) == 0) && nursery_section) {
- gint64 gc_start_time;
+ while (current) {
+ MonoObject *object = (MonoObject*)current->array;
- mono_profiler_gc_event (MONO_GC_EVENT_START, 0);
- gc_start_time = mono_100ns_ticks ();
+ if (object && !object->vtable) {
+ EphemeronLinkNode *tmp = current;
- stop_world (0);
- collect_nursery (0);
- restart_world (0);
+ if (prev)
+ prev->next = current->next;
+ else
+ ephemeron_list = current->next;
- mono_trace_message (MONO_TRACE_GC, "minor gc took %d usecs", (mono_100ns_ticks () - gc_start_time) / 10);
- mono_profiler_gc_event (MONO_GC_EVENT_END, 0);
- if (!degraded_mode && !mono_sgen_can_alloc_size (size) && size <= MAX_SMALL_OBJ_SIZE) {
- // FIXME:
- g_assert_not_reached ();
- }
+ current = current->next;
+ mono_sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
+ } else {
+ prev = current;
+ current = current->next;
}
}
+}
- /*
- * We must already have the lock here instead of after the
- * fast path because we might be interrupted in the fast path
- * (after confirming that new_next < TLAB_TEMP_END) by the GC,
- * and we'll end up allocating an object in a fragment which
- * no longer belongs to us.
- *
- * The managed allocator does not do this, but it's treated
- * specially by the world-stopping code.
- */
+/* LOCKING: requires that the GC lock is held */
+static void
+clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
+{
+ int was_in_nursery, was_promoted;
+ EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
+ MonoArray *array;
+ Ephemeron *cur, *array_end;
+ char *tombstone;
- if (size > MAX_SMALL_OBJ_SIZE) {
- p = mono_sgen_los_alloc_large_inner (vtable, size);
- } else {
- /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
-
- p = (void**)TLAB_NEXT;
- /* FIXME: handle overflow */
- new_next = (char*)p + size;
- TLAB_NEXT = new_next;
-
- if (G_LIKELY (new_next < TLAB_TEMP_END)) {
- /* Fast path */
-
- /*
- * FIXME: We might need a memory barrier here so the change to tlab_next is
- * visible before the vtable store.
- */
-
- DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
- binary_protocol_alloc (p , vtable, size);
- g_assert (*p == NULL);
- mono_atomic_store_seq (p, vtable);
-
- return p;
- }
-
- /* Slow path */
-
- /* there are two cases: the object is too big or we run out of space in the TLAB */
- /* we also reach here when the thread does its first allocation after a minor
- * collection, since the tlab_ variables are initialized to NULL.
- * there can be another case (from ORP), if we cooperate with the runtime a bit:
- * objects that need finalizers can have the high bit set in their size
- * so the above check fails and we can readily add the object to the queue.
- * This avoids taking again the GC lock when registering, but this is moot when
- * doing thread-local allocation, so it may not be a good idea.
- */
- if (TLAB_NEXT >= TLAB_REAL_END) {
- int available_in_tlab;
- /*
- * Run out of space in the TLAB. When this happens, some amount of space
- * remains in the TLAB, but not enough to satisfy the current allocation
- * request. Currently, we retire the TLAB in all cases, later we could
- * keep it if the remaining space is above a treshold, and satisfy the
- * allocation directly from the nursery.
- */
- TLAB_NEXT -= size;
- /* when running in degraded mode, we continue allocing that way
- * for a while, to decrease the number of useless nursery collections.
- */
- if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE) {
- p = alloc_degraded (vtable, size, FALSE);
- binary_protocol_alloc_degraded (p, vtable, size);
- return p;
- }
-
- available_in_tlab = TLAB_REAL_END - TLAB_NEXT;
- if (size > tlab_size || available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
- /* Allocate directly from the nursery */
- do {
- p = mono_sgen_nursery_alloc (size);
- if (!p) {
- minor_collect_or_expand_inner (size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size, FALSE);
- binary_protocol_alloc_degraded (p, vtable, size);
- return p;
- } else {
- p = mono_sgen_nursery_alloc (size);
- }
- }
- } while (!p);
- if (!p) {
- // no space left
- g_assert (0);
- }
-
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- memset (p, 0, size);
- }
- } else {
- int alloc_size = 0;
- if (TLAB_START)
- DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
- mono_sgen_nursery_retire_region (p, available_in_tlab);
-
- do {
- p = mono_sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
- if (!p) {
- minor_collect_or_expand_inner (tlab_size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size, FALSE);
- binary_protocol_alloc_degraded (p, vtable, size);
- return p;
- } else {
- p = mono_sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
- }
- }
- } while (!p);
-
- if (!p) {
- // no space left
- g_assert (0);
- }
-
- /* Allocate a new TLAB from the current nursery fragment */
- TLAB_START = (char*)p;
- TLAB_NEXT = TLAB_START;
- TLAB_REAL_END = TLAB_START + alloc_size;
- TLAB_TEMP_END = TLAB_START + MIN (SCAN_START_SIZE, alloc_size);
-
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
- memset (TLAB_START, 0, alloc_size);
- }
-
- /* Allocate from the TLAB */
- p = (void*)TLAB_NEXT;
- TLAB_NEXT += size;
- set_nursery_scan_start ((char*)p);
- }
- } else {
- /* Reached tlab_temp_end */
-
- /* record the scan start so we can find pinned objects more easily */
- set_nursery_scan_start ((char*)p);
- /* we just bump tlab_temp_end as well */
- TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SCAN_START_SIZE);
- DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", TLAB_NEXT, TLAB_TEMP_END));
- }
- }
-
- if (G_LIKELY (p)) {
- DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
- binary_protocol_alloc (p, vtable, size);
- mono_atomic_store_seq (p, vtable);
- }
-
- return p;
-}
-
-static void*
-mono_gc_try_alloc_obj_nolock (MonoVTable *vtable, size_t size)
-{
- void **p;
- char *new_next;
- TLAB_ACCESS_INIT;
-
- size = ALIGN_UP (size);
-
- g_assert (vtable->gc_descr);
- if (size > MAX_SMALL_OBJ_SIZE)
- return NULL;
-
- if (G_UNLIKELY (size > tlab_size)) {
- /* Allocate directly from the nursery */
- p = mono_sgen_nursery_alloc (size);
- if (!p)
- return NULL;
- set_nursery_scan_start ((char*)p);
-
- /*FIXME we should use weak memory ops here. Should help specially on x86. */
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
- memset (p, 0, size);
- } else {
- int available_in_tlab;
- char *real_end;
- /* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
-
- p = (void**)TLAB_NEXT;
- /* FIXME: handle overflow */
- new_next = (char*)p + size;
-
- real_end = TLAB_REAL_END;
- available_in_tlab = real_end - (char*)p;
-
- if (G_LIKELY (new_next < real_end)) {
- TLAB_NEXT = new_next;
-
- /* Second case, we overflowed temp end */
- if (G_UNLIKELY (new_next >= TLAB_TEMP_END)) {
- set_nursery_scan_start (new_next);
- /* we just bump tlab_temp_end as well */
- TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SCAN_START_SIZE);
- DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", TLAB_NEXT, TLAB_TEMP_END));
- }
- } else if (available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
- /* Allocate directly from the nursery */
- p = mono_sgen_nursery_alloc (size);
- if (!p)
- return NULL;
-
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
- memset (p, 0, size);
- } else {
- int alloc_size = 0;
-
- mono_sgen_nursery_retire_region (p, available_in_tlab);
- new_next = mono_sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
- p = (void**)new_next;
- if (!p)
- return NULL;
-
- TLAB_START = (char*)new_next;
- TLAB_NEXT = new_next + size;
- TLAB_REAL_END = new_next + alloc_size;
- TLAB_TEMP_END = new_next + MIN (SCAN_START_SIZE, alloc_size);
- set_nursery_scan_start ((char*)p);
-
- if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
- memset (new_next, 0, alloc_size);
- }
- }
-
- HEAVY_STAT (++stat_objects_alloced);
- HEAVY_STAT (stat_bytes_alloced += size);
-
- DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
- binary_protocol_alloc (p, vtable, size);
- g_assert (*p == NULL); /* FIXME disable this in non debug builds */
-
- mono_atomic_store_seq (p, vtable);
-
- return p;
-}
-
-void*
-mono_gc_alloc_obj (MonoVTable *vtable, size_t size)
-{
- void *res;
-#ifndef DISABLE_CRITICAL_REGION
- TLAB_ACCESS_INIT;
- ENTER_CRITICAL_REGION;
- res = mono_gc_try_alloc_obj_nolock (vtable, size);
- if (res) {
- EXIT_CRITICAL_REGION;
- return res;
- }
- EXIT_CRITICAL_REGION;
-#endif
- LOCK_GC;
- res = mono_gc_alloc_obj_nolock (vtable, size);
- UNLOCK_GC;
- if (G_UNLIKELY (!res))
- return mono_gc_out_of_memory (size);
- return res;
-}
-
-void*
-mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
-{
- MonoArray *arr;
-#ifndef DISABLE_CRITICAL_REGION
- TLAB_ACCESS_INIT;
- ENTER_CRITICAL_REGION;
- arr = mono_gc_try_alloc_obj_nolock (vtable, size);
- if (arr) {
- /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
- arr->max_length = max_length;
- EXIT_CRITICAL_REGION;
- return arr;
- }
- EXIT_CRITICAL_REGION;
-#endif
-
- LOCK_GC;
-
- arr = mono_gc_alloc_obj_nolock (vtable, size);
- if (G_UNLIKELY (!arr)) {
- UNLOCK_GC;
- return mono_gc_out_of_memory (size);
- }
-
- arr->max_length = max_length;
-
- UNLOCK_GC;
-
- return arr;
-}
-
-void*
-mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
-{
- MonoArray *arr;
- MonoArrayBounds *bounds;
-
- LOCK_GC;
-
- arr = mono_gc_alloc_obj_nolock (vtable, size);
- if (G_UNLIKELY (!arr)) {
- UNLOCK_GC;
- return mono_gc_out_of_memory (size);
- }
-
- arr->max_length = max_length;
-
- bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
- arr->bounds = bounds;
-
- UNLOCK_GC;
-
- return arr;
-}
-
-void*
-mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
-{
- MonoString *str;
-#ifndef DISABLE_CRITICAL_REGION
- TLAB_ACCESS_INIT;
- ENTER_CRITICAL_REGION;
- str = mono_gc_try_alloc_obj_nolock (vtable, size);
- if (str) {
- /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
- str->length = len;
- EXIT_CRITICAL_REGION;
- return str;
- }
- EXIT_CRITICAL_REGION;
-#endif
-
- LOCK_GC;
-
- str = mono_gc_alloc_obj_nolock (vtable, size);
- if (G_UNLIKELY (!str)) {
- UNLOCK_GC;
- return mono_gc_out_of_memory (size);
- }
-
- str->length = len;
-
- UNLOCK_GC;
-
- return str;
-}
-
-/*
- * To be used for interned strings and possibly MonoThread, reflection handles.
- * We may want to explicitly free these objects.
- */
-void*
-mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
-{
- void **p;
- size = ALIGN_UP (size);
- LOCK_GC;
-
- if (size > MAX_SMALL_OBJ_SIZE) {
- /* large objects are always pinned anyway */
- p = mono_sgen_los_alloc_large_inner (vtable, size);
- } else {
- DEBUG (9, g_assert (vtable->klass->inited));
- p = major_collector.alloc_small_pinned_obj (size, SGEN_VTABLE_HAS_REFERENCES (vtable));
- }
- if (G_LIKELY (p)) {
- DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
- binary_protocol_alloc_pinned (p, vtable, size);
- mono_atomic_store_seq (p, vtable);
- }
- UNLOCK_GC;
- return p;
-}
-
-void*
-mono_gc_alloc_mature (MonoVTable *vtable)
-{
- void **res;
- size_t size = ALIGN_UP (vtable->klass->instance_size);
- LOCK_GC;
- res = alloc_degraded (vtable, size, TRUE);
- mono_atomic_store_seq (res, vtable);
- UNLOCK_GC;
- if (G_UNLIKELY (vtable->klass->has_finalize))
- mono_object_register_finalizer ((MonoObject*)res);
-
- return res;
-}
-
-/*
- * ######################################################################
- * ######## Finalization support
- * ######################################################################
- */
-
-/*
- * this is valid for the nursery: if the object has been forwarded it means it's
- * still refrenced from a root. If it is pinned it's still alive as well.
- * Return TRUE if @obj is ready to be finalized.
- */
-#define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
-
-
-gboolean
-mono_sgen_gc_is_object_ready_for_finalization (void *object)
-{
- return !major_collector.is_object_live (object) && object_is_fin_ready (object);
-}
-
-static gboolean
-has_critical_finalizer (MonoObject *obj)
-{
- MonoClass *class;
-
- if (!mono_defaults.critical_finalizer_object)
- return FALSE;
-
- class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
-
- return mono_class_has_parent (class, mono_defaults.critical_finalizer_object);
-}
-
-static void
-queue_finalization_entry (MonoObject *obj) {
- FinalizeReadyEntry *entry = mono_sgen_alloc_internal (INTERNAL_MEM_FINALIZE_READY_ENTRY);
- entry->object = obj;
- if (has_critical_finalizer (obj)) {
- entry->next = critical_fin_list;
- critical_fin_list = entry;
- } else {
- entry->next = fin_ready_list;
- fin_ready_list = entry;
- }
-}
-
-static int
-object_is_reachable (char *object, char *start, char *end)
-{
- /*This happens for non nursery objects during minor collections. We just treat all objects as alive.*/
- if (object < start || object >= end)
- return TRUE;
- return !object_is_fin_ready (object) || major_collector.is_object_live (object);
-}
-
-#include "sgen-fin-weak-hash.c"
-
-gboolean
-mono_sgen_object_is_live (void *obj)
-{
- if (ptr_in_nursery (obj))
- return object_is_pinned (obj);
- if (current_collection_generation == GENERATION_NURSERY)
- return FALSE;
- return major_collector.is_object_live (obj);
-}
-
-/* LOCKING: requires that the GC lock is held */
-static void
-null_ephemerons_for_domain (MonoDomain *domain)
-{
- EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
-
- while (current) {
- MonoObject *object = (MonoObject*)current->array;
-
- if (object && !object->vtable) {
- EphemeronLinkNode *tmp = current;
-
- if (prev)
- prev->next = current->next;
- else
- ephemeron_list = current->next;
-
- current = current->next;
- mono_sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
- } else {
- prev = current;
- current = current->next;
- }
- }
-}
-
-/* LOCKING: requires that the GC lock is held */
-static void
-clear_unreachable_ephemerons (CopyOrMarkObjectFunc copy_func, char *start, char *end, GrayQueue *queue)
-{
- int was_in_nursery, was_promoted;
- EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
- MonoArray *array;
- Ephemeron *cur, *array_end;
- char *tombstone;
-
- while (current) {
- char *object = current->array;
+ while (current) {
+ char *object = current->array;
if (!object_is_reachable (object, start, end)) {
EphemeronLinkNode *tmp = current;
void
mono_sgen_fill_thread_info_for_suspend (SgenThreadInfo *info)
{
-#ifdef HAVE_KW_THREAD
- /* update the remset info in the thread data structure */
- info->remset = remembered_set;
-#endif
+ if (remset.fill_thread_info_for_suspend)
+ remset.fill_thread_info_for_suspend (info);
}
-/*
- * Define this and use the "xdomain-checks" MONO_GC_DEBUG option to
- * have cross-domain checks in the write barrier.
- */
-//#define XDOMAIN_CHECKS_IN_WBARRIER
-
-#ifndef SGEN_BINARY_PROTOCOL
-#ifndef HEAVY_STATISTICS
-#define MANAGED_ALLOCATION
-#ifndef XDOMAIN_CHECKS_IN_WBARRIER
-#define MANAGED_WBARRIER
-#endif
-#endif
-#endif
-
static gboolean
is_ip_in_managed_allocator (MonoDomain *domain, gpointer ip);
allocator */
FOREACH_THREAD_SAFE (info) {
gboolean result;
- if (info->skip)
+ if (info->skip || info->gc_disabled)
continue;
if (!info->thread_is_dying && (!info->stack_start || info->in_critical_region ||
is_ip_in_managed_allocator (info->stopped_domain, info->stopped_ip))) {
UserCopyOrMarkData *data = mono_native_tls_get_value (user_copy_or_mark_key);
if (current_collection_generation == GENERATION_NURSERY) {
- if (collection_is_parallel ())
+ if (mono_sgen_collection_is_parallel ())
major_collector.copy_object (&obj, data->queue);
else
major_collector.nopar_copy_object (&obj, data->queue);
DEBUG (3, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
continue;
}
- DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %ld, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
+ if (info->gc_disabled) {
+ DEBUG (3, fprintf (gc_debug_file, "GC disabled for thread %p, range: %p-%p, size: %td\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
+ continue;
+ }
+ DEBUG (3, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %ld, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, mono_sgen_get_pinned_count ()));
if (!info->thread_is_dying) {
if (gc_callbacks.thread_mark_func && !conservative_stack_mark) {
UserCopyOrMarkData data = { NULL, queue };
return FALSE;
}
-static mword*
-handle_remset (mword *p, void *start_nursery, void *end_nursery, gboolean global, GrayQueue *queue)
+static void*
+sgen_thread_register (SgenThreadInfo* info, void *addr)
{
- void **ptr;
- mword count;
- mword desc;
+#ifndef HAVE_KW_THREAD
+ SgenThreadInfo *__thread_info__ = info;
+#endif
- if (global)
- HEAVY_STAT (++stat_global_remsets_processed);
- else
- HEAVY_STAT (++stat_local_remsets_processed);
-
- /* FIXME: exclude stack locations */
- switch ((*p) & REMSET_TYPE_MASK) {
- case REMSET_LOCATION:
- ptr = (void**)(*p);
- //__builtin_prefetch (ptr);
- if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery)) {
- gpointer old = *ptr;
- major_collector.copy_object (ptr, queue);
- DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr));
- if (old)
- binary_protocol_ptr_update (ptr, old, *ptr, (gpointer)LOAD_VTABLE (*ptr), safe_object_get_size (*ptr));
- if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
- /*
- * If the object is pinned, each reference to it from nonpinned objects
- * becomes part of the global remset, which can grow very large.
- */
- DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
- mono_sgen_add_to_global_remset (ptr);
- }
- } else {
- DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr));
- }
- return p + 1;
- case REMSET_RANGE:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
- return p + 2;
- count = p [1];
- while (count-- > 0) {
- major_collector.copy_object (ptr, queue);
- DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count));
- if (!global && *ptr >= start_nursery && *ptr < end_nursery)
- mono_sgen_add_to_global_remset (ptr);
- ++ptr;
- }
- return p + 2;
- case REMSET_OBJECT:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
- return p + 1;
- mono_sgen_get_minor_scan_object () ((char*)ptr, queue);
- return p + 1;
- case REMSET_VTYPE: {
- ScanVTypeFunc scan_vtype = mono_sgen_get_minor_scan_vtype ();
- size_t skip_size;
-
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery))
- return p + 4;
- desc = p [1];
- count = p [2];
- skip_size = p [3];
- while (count-- > 0) {
- scan_vtype ((char*)ptr, desc, queue);
- ptr = (void**)((char*)ptr + skip_size);
- }
- return p + 4;
- }
- default:
- g_assert_not_reached ();
- }
- return NULL;
-}
-
-#ifdef HEAVY_STATISTICS
-static mword*
-collect_store_remsets (RememberedSet *remset, mword *bumper)
-{
- mword *p = remset->data;
- mword last = 0;
- mword last1 = 0;
- mword last2 = 0;
-
- while (p < remset->store_next) {
- switch ((*p) & REMSET_TYPE_MASK) {
- case REMSET_LOCATION:
- *bumper++ = *p;
- if (*p == last)
- ++stat_saved_remsets_1;
- last = *p;
- if (*p == last1 || *p == last2) {
- ++stat_saved_remsets_2;
- } else {
- last2 = last1;
- last1 = *p;
- }
- p += 1;
- break;
- case REMSET_RANGE:
- p += 2;
- break;
- case REMSET_OBJECT:
- p += 1;
- break;
- case REMSET_VTYPE:
- p += 4;
- break;
- default:
- g_assert_not_reached ();
- }
- }
-
- return bumper;
-}
-
-static void
-remset_stats (void)
-{
- RememberedSet *remset;
- int size = 0;
- SgenThreadInfo *info;
- int i;
- mword *addresses, *bumper, *p, *r;
-
- FOREACH_THREAD (info) {
- for (remset = info->remset; remset; remset = remset->next)
- size += remset->store_next - remset->data;
- } END_FOREACH_THREAD
- for (remset = freed_thread_remsets; remset; remset = remset->next)
- size += remset->store_next - remset->data;
- for (remset = global_remset; remset; remset = remset->next)
- size += remset->store_next - remset->data;
-
- bumper = addresses = mono_sgen_alloc_internal_dynamic (sizeof (mword) * size, INTERNAL_MEM_STATISTICS);
-
- FOREACH_THREAD (info) {
- for (remset = info->remset; remset; remset = remset->next)
- bumper = collect_store_remsets (remset, bumper);
- } END_FOREACH_THREAD
- for (remset = global_remset; remset; remset = remset->next)
- bumper = collect_store_remsets (remset, bumper);
- for (remset = freed_thread_remsets; remset; remset = remset->next)
- bumper = collect_store_remsets (remset, bumper);
-
- g_assert (bumper <= addresses + size);
-
- stat_store_remsets += bumper - addresses;
-
- sort_addresses ((void**)addresses, bumper - addresses);
- p = addresses;
- r = addresses + 1;
- while (r < bumper) {
- if (*r != *p)
- *++p = *r;
- ++r;
- }
-
- stat_store_remsets_unique += p - addresses;
-
- mono_sgen_free_internal_dynamic (addresses, sizeof (mword) * size, INTERNAL_MEM_STATISTICS);
-}
-#endif
-
-static void
-clear_thread_store_remset_buffer (SgenThreadInfo *info)
-{
- *info->store_remset_buffer_index_addr = 0;
- /* See the comment at the end of sgen_thread_unregister() */
- if (*info->store_remset_buffer_addr)
- memset (*info->store_remset_buffer_addr, 0, sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
-}
-
-static size_t
-remset_byte_size (RememberedSet *remset)
-{
- return sizeof (RememberedSet) + (remset->end_set - remset->data) * sizeof (gpointer);
-}
-
-static void
-scan_from_global_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue)
-{
- RememberedSet *remset;
- mword *p, *next_p, *store_pos;
-
- /* the global one */
- for (remset = global_remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
- store_pos = remset->data;
- for (p = remset->data; p < remset->store_next; p = next_p) {
- void **ptr = (void**)p [0];
-
- /*Ignore previously processed remset.*/
- if (!global_remset_location_was_not_added (ptr)) {
- next_p = p + 1;
- continue;
- }
-
- next_p = handle_remset (p, start_nursery, end_nursery, TRUE, queue);
-
- /*
- * Clear global remsets of locations which no longer point to the
- * nursery. Otherwise, they could grow indefinitely between major
- * collections.
- *
- * Since all global remsets are location remsets, we don't need to unmask the pointer.
- */
- if (ptr_in_nursery (*ptr)) {
- *store_pos ++ = p [0];
- HEAVY_STAT (++stat_global_remsets_readded);
- }
- }
-
- /* Truncate the remset */
- remset->store_next = store_pos;
- }
-}
-
-static void
-scan_from_remsets (void *start_nursery, void *end_nursery, GrayQueue *queue)
-{
- int i;
- SgenThreadInfo *info;
- RememberedSet *remset;
- GenericStoreRememberedSet *store_remset;
- mword *p;
-
-#ifdef HEAVY_STATISTICS
- remset_stats ();
-#endif
-
- /* the generic store ones */
- store_remset = generic_store_remsets;
- while (store_remset) {
- GenericStoreRememberedSet *next = store_remset->next;
-
- for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) {
- gpointer addr = store_remset->data [i];
- if (addr)
- handle_remset ((mword*)&addr, start_nursery, end_nursery, FALSE, queue);
- }
-
- mono_sgen_free_internal (store_remset, INTERNAL_MEM_STORE_REMSET);
-
- store_remset = next;
- }
- generic_store_remsets = NULL;
-
- /* the per-thread ones */
- FOREACH_THREAD (info) {
- RememberedSet *next;
- int j;
- for (remset = info->remset; remset; remset = next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
- for (p = remset->data; p < remset->store_next;)
- p = handle_remset (p, start_nursery, end_nursery, FALSE, queue);
- remset->store_next = remset->data;
- next = remset->next;
- remset->next = NULL;
- if (remset != info->remset) {
- DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
- mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
- }
- }
- for (j = 0; j < *info->store_remset_buffer_index_addr; ++j)
- handle_remset ((mword*)*info->store_remset_buffer_addr + j + 1, start_nursery, end_nursery, FALSE, queue);
- clear_thread_store_remset_buffer (info);
- } END_FOREACH_THREAD
-
- /* the freed thread ones */
- while (freed_thread_remsets) {
- RememberedSet *next;
- remset = freed_thread_remsets;
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
- for (p = remset->data; p < remset->store_next;)
- p = handle_remset (p, start_nursery, end_nursery, FALSE, queue);
- next = remset->next;
- DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
- mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
- freed_thread_remsets = next;
- }
-}
-
-/*
- * Clear the info in the remembered sets: we're doing a major collection, so
- * the per-thread ones are not needed and the global ones will be reconstructed
- * during the copy.
- */
-static void
-clear_remsets (void)
-{
- SgenThreadInfo *info;
- RememberedSet *remset, *next;
-
- /* the global list */
- for (remset = global_remset; remset; remset = next) {
- remset->store_next = remset->data;
- next = remset->next;
- remset->next = NULL;
- if (remset != global_remset) {
- DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
- mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
- }
- }
- /* the generic store ones */
- while (generic_store_remsets) {
- GenericStoreRememberedSet *gs_next = generic_store_remsets->next;
- mono_sgen_free_internal (generic_store_remsets, INTERNAL_MEM_STORE_REMSET);
- generic_store_remsets = gs_next;
- }
- /* the per-thread ones */
- FOREACH_THREAD (info) {
- for (remset = info->remset; remset; remset = next) {
- remset->store_next = remset->data;
- next = remset->next;
- remset->next = NULL;
- if (remset != info->remset) {
- DEBUG (3, fprintf (gc_debug_file, "Freed remset at %p\n", remset->data));
- mono_sgen_free_internal_dynamic (remset, remset_byte_size (remset), INTERNAL_MEM_REMSET);
- }
- }
- clear_thread_store_remset_buffer (info);
- } END_FOREACH_THREAD
-
- /* the freed thread ones */
- while (freed_thread_remsets) {
- next = freed_thread_remsets->next;
- DEBUG (4, fprintf (gc_debug_file, "Freed remset at %p\n", freed_thread_remsets->data));
- mono_sgen_free_internal_dynamic (freed_thread_remsets, remset_byte_size (freed_thread_remsets), INTERNAL_MEM_REMSET);
- freed_thread_remsets = next;
- }
-}
-
-/*
- * Clear the thread local TLAB variables for all threads.
- */
-static void
-clear_tlabs (void)
-{
- SgenThreadInfo *info;
-
- FOREACH_THREAD (info) {
- /* A new TLAB will be allocated when the thread does its first allocation */
- *info->tlab_start_addr = NULL;
- *info->tlab_next_addr = NULL;
- *info->tlab_temp_end_addr = NULL;
- *info->tlab_real_end_addr = NULL;
- } END_FOREACH_THREAD
-}
-
-static void*
-sgen_thread_register (SgenThreadInfo* info, void *addr)
-{
-#ifndef HAVE_KW_THREAD
- SgenThreadInfo *__thread_info__ = info;
-#endif
-
- LOCK_GC;
-#ifndef HAVE_KW_THREAD
- info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
+ LOCK_GC;
+#ifndef HAVE_KW_THREAD
+ info->tlab_start = info->tlab_next = info->tlab_temp_end = info->tlab_real_end = NULL;
g_assert (!mono_native_tls_get_value (thread_info_key));
mono_native_tls_set_value (thread_info_key, info);
info->doing_handshake = FALSE;
info->thread_is_dying = FALSE;
info->stack_start = NULL;
- info->tlab_start_addr = &TLAB_START;
- info->tlab_next_addr = &TLAB_NEXT;
- info->tlab_temp_end_addr = &TLAB_TEMP_END;
- info->tlab_real_end_addr = &TLAB_REAL_END;
info->store_remset_buffer_addr = &STORE_REMSET_BUFFER;
info->store_remset_buffer_index_addr = &STORE_REMSET_BUFFER_INDEX;
info->stopped_ip = NULL;
info->stopped_regs = NULL;
#endif
+ mono_sgen_init_tlab_info (info);
+
binary_protocol_thread_register ((gpointer)mono_thread_info_get_tid (info));
#ifdef HAVE_KW_THREAD
- tlab_next_addr = &tlab_next;
store_remset_buffer_index_addr = &store_remset_buffer_index;
#endif
stack_end = info->stack_end;
#endif
- info->remset = alloc_remset (DEFAULT_REMSET_SIZE, info, FALSE);
- mono_native_tls_set_value (remembered_set_key, info->remset);
-#ifdef HAVE_KW_THREAD
- remembered_set = info->remset;
-#endif
-
- STORE_REMSET_BUFFER = mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET);
- STORE_REMSET_BUFFER_INDEX = 0;
+ if (remset.register_thread)
+ remset.register_thread (info);
DEBUG (3, fprintf (gc_debug_file, "registered thread %p (%p) stack end %p\n", info, (gpointer)mono_thread_info_get_tid (info), info->stack_end));
}
static void
-add_generic_store_remset_from_buffer (gpointer *buffer)
+mono_sgen_wbarrier_cleanup_thread (SgenThreadInfo *p)
{
- GenericStoreRememberedSet *remset = mono_sgen_alloc_internal (INTERNAL_MEM_STORE_REMSET);
- memcpy (remset->data, buffer + 1, sizeof (gpointer) * (STORE_REMSET_BUFFER_SIZE - 1));
- remset->next = generic_store_remsets;
- generic_store_remsets = remset;
+ if (remset.cleanup_thread)
+ remset.cleanup_thread (p);
}
static void
sgen_thread_unregister (SgenThreadInfo *p)
{
- RememberedSet *rset;
-
/* If a delegate is passed to native code and invoked on a thread we dont
* know about, the jit will register it with mono_jit_thread_attach, but
* we have no way of knowing when that thread goes away. SGen has a TSD
gc_callbacks.thread_detach_func (p->runtime_data);
p->runtime_data = NULL;
}
-
- if (p->remset) {
- if (freed_thread_remsets) {
- for (rset = p->remset; rset->next; rset = rset->next)
- ;
- rset->next = freed_thread_remsets;
- freed_thread_remsets = p->remset;
- } else {
- freed_thread_remsets = p->remset;
- }
- }
- if (*p->store_remset_buffer_index_addr)
- add_generic_store_remset_from_buffer (*p->store_remset_buffer_addr);
- mono_sgen_free_internal (*p->store_remset_buffer_addr, INTERNAL_MEM_STORE_REMSET);
- /*
- * This is currently not strictly required, but we do it
- * anyway in case we change thread unregistering:
-
- * If the thread is removed from the thread list after
- * unregistering (this is currently not the case), and a
- * collection occurs, clear_remsets() would want to memset
- * this buffer, which would either clobber memory or crash.
- */
- *p->store_remset_buffer_addr = NULL;
+ mono_sgen_wbarrier_cleanup_thread (p);
mono_threads_unregister_current_thread (p);
UNLOCK_GC;
* ######################################################################
*/
-/*
- * This causes the compile to extend the liveness of 'v' till the call to dummy_use
- */
-static void
-dummy_use (gpointer v) {
- __asm__ volatile ("" : "=r"(v) : "r"(v));
-}
-
-
-static RememberedSet*
-alloc_remset (int size, gpointer id, gboolean global)
-{
- RememberedSet* res = mono_sgen_alloc_internal_dynamic (sizeof (RememberedSet) + (size * sizeof (gpointer)), INTERNAL_MEM_REMSET);
- res->store_next = res->data;
- res->end_set = res->data + size;
- res->next = NULL;
- DEBUG (4, fprintf (gc_debug_file, "Allocated%s remset size %d at %p for %p\n", global ? " global" : "", size, res->data, id));
- return res;
-}
-
/*
* Note: the write barriers first do the needed GC work and then do the actual store:
* this way the value is visible to the conservative GC scan after the write barrier
DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", field_ptr));
if (value)
binary_protocol_wbarrier (field_ptr, value, value->vtable);
- if (use_cardtable) {
- *(void**)field_ptr = value;
- if (ptr_in_nursery (value))
- sgen_card_table_mark_address ((mword)field_ptr);
- dummy_use (value);
- } else {
- RememberedSet *rs;
- TLAB_ACCESS_INIT;
- LOCK_GC;
- rs = REMEMBERED_SET;
- if (rs->store_next < rs->end_set) {
- *(rs->store_next++) = (mword)field_ptr;
- *(void**)field_ptr = value;
- UNLOCK_GC;
- return;
- }
- rs = alloc_remset (rs->end_set - rs->data, (void*)1, FALSE);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-#ifdef HAVE_KW_THREAD
- mono_thread_info_current ()->remset = rs;
-#endif
- *(rs->store_next++) = (mword)field_ptr;
- *(void**)field_ptr = value;
- UNLOCK_GC;
- }
+ remset.wbarrier_set_field (obj, field_ptr, value);
}
void
DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", slot_ptr));
if (value)
binary_protocol_wbarrier (slot_ptr, value, value->vtable);
- if (use_cardtable) {
- *(void**)slot_ptr = value;
- if (ptr_in_nursery (value))
- sgen_card_table_mark_address ((mword)slot_ptr);
- dummy_use (value);
- } else {
- RememberedSet *rs;
- TLAB_ACCESS_INIT;
- LOCK_GC;
- rs = REMEMBERED_SET;
- if (rs->store_next < rs->end_set) {
- *(rs->store_next++) = (mword)slot_ptr;
- *(void**)slot_ptr = value;
- UNLOCK_GC;
- return;
- }
- rs = alloc_remset (rs->end_set - rs->data, (void*)1, FALSE);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-#ifdef HAVE_KW_THREAD
- mono_thread_info_current ()->remset = rs;
-#endif
- *(rs->store_next++) = (mword)slot_ptr;
- *(void**)slot_ptr = value;
- UNLOCK_GC;
- }
+ remset.wbarrier_set_arrayref (arr, slot_ptr, value);
}
void
}
#endif
- if (use_cardtable) {
- gpointer *dest = dest_ptr;
- gpointer *src = src_ptr;
-
- /*overlapping that required backward copying*/
- if (src < dest && (src + count) > dest) {
- gpointer *start = dest;
- dest += count - 1;
- src += count - 1;
-
- for (; dest >= start; --src, --dest) {
- gpointer value = *src;
- *dest = value;
- if (ptr_in_nursery (value))
- sgen_card_table_mark_address ((mword)dest);
- dummy_use (value);
- }
- } else {
- gpointer *end = dest + count;
- for (; dest < end; ++src, ++dest) {
- gpointer value = *src;
- *dest = value;
- if (ptr_in_nursery (value))
- sgen_card_table_mark_address ((mword)dest);
- dummy_use (value);
- }
- }
- } else {
- RememberedSet *rs;
- TLAB_ACCESS_INIT;
- LOCK_GC;
- mono_gc_memmove (dest_ptr, src_ptr, count * sizeof (gpointer));
-
- rs = REMEMBERED_SET;
- DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p, %d\n", dest_ptr, count));
- if (rs->store_next + 1 < rs->end_set) {
- *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
- *(rs->store_next++) = count;
- UNLOCK_GC;
- return;
- }
- rs = alloc_remset (rs->end_set - rs->data, (void*)1, FALSE);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-#ifdef HAVE_KW_THREAD
- mono_thread_info_current ()->remset = rs;
-#endif
- *(rs->store_next++) = (mword)dest_ptr | REMSET_RANGE;
- *(rs->store_next++) = count;
-
- UNLOCK_GC;
- }
+ remset.wbarrier_arrayref_copy (dest_ptr, src_ptr, count);
}
static char *found_obj;
return found_obj;
}
-static void
-evacuate_remset_buffer (void)
-{
- gpointer *buffer;
- TLAB_ACCESS_INIT;
-
- buffer = STORE_REMSET_BUFFER;
-
- add_generic_store_remset_from_buffer (buffer);
- memset (buffer, 0, sizeof (gpointer) * STORE_REMSET_BUFFER_SIZE);
-
- STORE_REMSET_BUFFER_INDEX = 0;
-}
-
void
mono_gc_wbarrier_generic_nostore (gpointer ptr)
{
- gpointer *buffer;
- int index;
- TLAB_ACCESS_INIT;
-
HEAVY_STAT (++stat_wbarrier_generic_store);
#ifdef XDOMAIN_CHECKS_IN_WBARRIER
return;
}
- if (use_cardtable) {
- if (ptr_in_nursery(*(gpointer*)ptr))
- sgen_card_table_mark_address ((mword)ptr);
- return;
- }
-
- LOCK_GC;
-
- buffer = STORE_REMSET_BUFFER;
- index = STORE_REMSET_BUFFER_INDEX;
- /* This simple optimization eliminates a sizable portion of
- entries. Comparing it to the last but one entry as well
- doesn't eliminate significantly more entries. */
- if (buffer [index] == ptr) {
- UNLOCK_GC;
- return;
- }
-
DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p\n", ptr));
- HEAVY_STAT (++stat_wbarrier_generic_store_remset);
-
- ++index;
- if (index >= STORE_REMSET_BUFFER_SIZE) {
- evacuate_remset_buffer ();
- index = STORE_REMSET_BUFFER_INDEX;
- g_assert (index == 0);
- ++index;
- }
- buffer [index] = ptr;
- STORE_REMSET_BUFFER_INDEX = index;
- UNLOCK_GC;
+ remset.wbarrier_generic_nostore (ptr);
}
void
*(void**)ptr = value;
if (ptr_in_nursery (value))
mono_gc_wbarrier_generic_nostore (ptr);
- dummy_use (value);
-}
-
-void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
-{
- mword *dest = _dest;
- mword *src = _src;
-
- while (size) {
- if (bitmap & 0x1)
- mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
- else
- *dest = *src;
- ++src;
- ++dest;
- size -= SIZEOF_VOID_P;
- bitmap >>= 1;
- }
-}
-
-#ifdef SGEN_BINARY_PROTOCOL
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- gpointer o = *(gpointer*)(ptr); \
- if ((o)) { \
- gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
- binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
- } \
- } while (0)
-
-static void
-scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
-{
-#define SCAN_OBJECT_NOVTABLE
-#include "sgen-scan-object.h"
-}
-#endif
-
-void
-mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
-{
- RememberedSet *rs;
- size_t element_size = mono_class_value_size (klass, NULL);
- size_t size = count * element_size;
- TLAB_ACCESS_INIT;
- HEAVY_STAT (++stat_wbarrier_value_copy);
- g_assert (klass->valuetype);
-#ifdef SGEN_BINARY_PROTOCOL
- {
- int i;
- for (i = 0; i < count; ++i) {
- scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
- (char*)src + i * element_size - sizeof (MonoObject),
- (mword) klass->gc_descr);
- }
- }
-#endif
- if (use_cardtable) {
-#ifdef DISABLE_CRITICAL_REGION
- LOCK_GC;
-#else
- ENTER_CRITICAL_REGION;
-#endif
- mono_gc_memmove (dest, src, size);
- sgen_card_table_mark_range ((mword)dest, size);
-#ifdef DISABLE_CRITICAL_REGION
- UNLOCK_GC;
-#else
- EXIT_CRITICAL_REGION;
-#endif
- } else {
- LOCK_GC;
- mono_gc_memmove (dest, src, size);
- rs = REMEMBERED_SET;
- if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
- UNLOCK_GC;
- return;
- }
- g_assert (klass->gc_descr_inited);
- DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
-
- if (rs->store_next + 4 < rs->end_set) {
- *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
- *(rs->store_next++) = (mword)klass->gc_descr;
- *(rs->store_next++) = (mword)count;
- *(rs->store_next++) = (mword)element_size;
- UNLOCK_GC;
- return;
- }
- rs = alloc_remset (rs->end_set - rs->data, (void*)1, FALSE);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-#ifdef HAVE_KW_THREAD
- mono_thread_info_current ()->remset = rs;
-#endif
- *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
- *(rs->store_next++) = (mword)klass->gc_descr;
- *(rs->store_next++) = (mword)count;
- *(rs->store_next++) = (mword)element_size;
- UNLOCK_GC;
- }
-}
-
-/**
- * mono_gc_wbarrier_object_copy:
- *
- * Write barrier to call when obj is the result of a clone or copy of an object.
- */
-void
-mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
-{
- RememberedSet *rs;
- int size;
-
- TLAB_ACCESS_INIT;
- HEAVY_STAT (++stat_wbarrier_object_copy);
- rs = REMEMBERED_SET;
- DEBUG (6, fprintf (gc_debug_file, "Adding object remset for %p\n", obj));
- size = mono_object_class (obj)->instance_size;
- LOCK_GC;
-#ifdef SGEN_BINARY_PROTOCOL
- scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
-#endif
- /* do not copy the sync state */
- mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
- size - sizeof (MonoObject));
- if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
- UNLOCK_GC;
- return;
- }
- if (rs->store_next < rs->end_set) {
- *(rs->store_next++) = (mword)obj | REMSET_OBJECT;
- UNLOCK_GC;
- return;
- }
- rs = alloc_remset (rs->end_set - rs->data, (void*)1, FALSE);
- rs->next = REMEMBERED_SET;
- REMEMBERED_SET = rs;
-
-#ifdef HAVE_KW_THREAD
- mono_thread_info_current ()->remset = rs;
-#endif
- *(rs->store_next++) = (mword)obj | REMSET_OBJECT;
- UNLOCK_GC;
-}
-
-/*
- * ######################################################################
- * ######## Collector debugging
- * ######################################################################
- */
-
-const char*descriptor_types [] = {
- "run_length",
- "small_bitmap",
- "string",
- "complex",
- "vector",
- "array",
- "large_bitmap",
- "complex_arr"
-};
-
-void
-describe_ptr (char *ptr)
-{
- MonoVTable *vtable;
- mword desc;
- int type;
- char *start;
-
- if (ptr_in_nursery (ptr)) {
- printf ("Pointer inside nursery.\n");
- } else {
- if (mono_sgen_ptr_is_in_los (ptr, &start)) {
- if (ptr == start)
- printf ("Pointer is the start of object %p in LOS space.\n", start);
- else
- printf ("Pointer is at offset 0x%x of object %p in LOS space.\n", (int)(ptr - start), start);
- ptr = start;
- } else if (major_collector.ptr_is_in_non_pinned_space (ptr)) {
- printf ("Pointer inside oldspace.\n");
- } else if (major_collector.obj_is_from_pinned_alloc (ptr)) {
- printf ("Pointer is inside a pinned chunk.\n");
- } else {
- printf ("Pointer unknown.\n");
- return;
- }
- }
-
- if (object_is_pinned (ptr))
- printf ("Object is pinned.\n");
-
- if (object_is_forwarded (ptr))
- printf ("Object is forwared.\n");
-
- // FIXME: Handle pointers to the inside of objects
- vtable = (MonoVTable*)LOAD_VTABLE (ptr);
-
- printf ("VTable: %p\n", vtable);
- if (vtable == NULL) {
- printf ("VTable is invalid (empty).\n");
- return;
- }
- if (ptr_in_nursery (vtable)) {
- printf ("VTable is invalid (points inside nursery).\n");
- return;
- }
- printf ("Class: %s\n", vtable->klass->name);
-
- desc = ((GCVTable*)vtable)->desc;
- printf ("Descriptor: %lx\n", (long)desc);
-
- type = desc & 0x7;
- printf ("Descriptor type: %d (%s)\n", type, descriptor_types [type]);
-}
-
-static mword*
-find_in_remset_loc (mword *p, char *addr, gboolean *found)
-{
- void **ptr;
- mword count, desc;
- size_t skip_size;
-
- switch ((*p) & REMSET_TYPE_MASK) {
- case REMSET_LOCATION:
- if (*p == (mword)addr)
- *found = TRUE;
- return p + 1;
- case REMSET_RANGE:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- count = p [1];
- if ((void**)addr >= ptr && (void**)addr < ptr + count)
- *found = TRUE;
- return p + 2;
- case REMSET_OBJECT:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- count = safe_object_get_size ((MonoObject*)ptr);
- count = ALIGN_UP (count);
- count /= sizeof (mword);
- if ((void**)addr >= ptr && (void**)addr < ptr + count)
- *found = TRUE;
- return p + 1;
- case REMSET_VTYPE:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- desc = p [1];
- count = p [2];
- skip_size = p [3];
-
- /* The descriptor includes the size of MonoObject */
- skip_size -= sizeof (MonoObject);
- skip_size *= count;
- if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
- *found = TRUE;
-
- return p + 4;
- default:
- g_assert_not_reached ();
- }
- return NULL;
-}
-
-/*
- * Return whenever ADDR occurs in the remembered sets
- */
-static gboolean
-find_in_remsets (char *addr)
-{
- int i;
- SgenThreadInfo *info;
- RememberedSet *remset;
- GenericStoreRememberedSet *store_remset;
- mword *p;
- gboolean found = FALSE;
-
- /* the global one */
- for (remset = global_remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
- for (p = remset->data; p < remset->store_next;) {
- p = find_in_remset_loc (p, addr, &found);
- if (found)
- return TRUE;
- }
- }
-
- /* the generic store ones */
- for (store_remset = generic_store_remsets; store_remset; store_remset = store_remset->next) {
- for (i = 0; i < STORE_REMSET_BUFFER_SIZE - 1; ++i) {
- if (store_remset->data [i] == addr)
- return TRUE;
- }
- }
-
- /* the per-thread ones */
- FOREACH_THREAD (info) {
- int j;
- for (remset = info->remset; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for thread %p, range: %p-%p, size: %td\n", info, remset->data, remset->store_next, remset->store_next - remset->data));
- for (p = remset->data; p < remset->store_next;) {
- p = find_in_remset_loc (p, addr, &found);
- if (found)
- return TRUE;
- }
- }
- for (j = 0; j < *info->store_remset_buffer_index_addr; ++j) {
- if ((*info->store_remset_buffer_addr) [j + 1] == addr)
- return TRUE;
- }
- } END_FOREACH_THREAD
-
- /* the freed thread ones */
- for (remset = freed_thread_remsets; remset; remset = remset->next) {
- DEBUG (4, fprintf (gc_debug_file, "Scanning remset for freed thread, range: %p-%p, size: %td\n", remset->data, remset->store_next, remset->store_next - remset->data));
- for (p = remset->data; p < remset->store_next;) {
- p = find_in_remset_loc (p, addr, &found);
- if (found)
- return TRUE;
- }
- }
-
- return FALSE;
-}
-
-static gboolean missing_remsets;
-
-/*
- * We let a missing remset slide if the target object is pinned,
- * because the store might have happened but the remset not yet added,
- * but in that case the target must be pinned. We might theoretically
- * miss some missing remsets this way, but it's very unlikely.
- */
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr) && (char*)*(ptr) >= nursery_start && (char*)*(ptr) < nursery_end) { \
- if (!find_in_remsets ((char*)(ptr)) && (!use_cardtable || !sgen_card_table_address_is_marked ((mword)ptr))) { \
- fprintf (gc_debug_file, "Oldspace->newspace reference %p at offset %td in object %p (%s.%s) not found in remsets.\n", *(ptr), (char*)(ptr) - (char*)(obj), (obj), ((MonoObject*)(obj))->vtable->klass->name_space, ((MonoObject*)(obj))->vtable->klass->name); \
- binary_protocol_missing_remset ((obj), (gpointer)LOAD_VTABLE ((obj)), (char*)(ptr) - (char*)(obj), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
- if (!object_is_pinned (*(ptr))) \
- missing_remsets = TRUE; \
- } \
- } \
- } while (0)
-
-/*
- * Check that each object reference which points into the nursery can
- * be found in the remembered sets.
- */
-static void
-check_consistency_callback (char *start, size_t size, void *dummy)
-{
- GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
- DEBUG (8, fprintf (gc_debug_file, "Scanning object %p, vtable: %p (%s)\n", start, vt, vt->klass->name));
-
-#define SCAN_OBJECT_ACTION
-#include "sgen-scan-object.h"
+ mono_sgen_dummy_use (value);
}
-/*
- * Perform consistency check of the heap.
- *
- * Assumes the world is stopped.
- */
-static void
-check_consistency (void)
+void mono_gc_wbarrier_value_copy_bitmap (gpointer _dest, gpointer _src, int size, unsigned bitmap)
{
- // Need to add more checks
-
- missing_remsets = FALSE;
-
- DEBUG (1, fprintf (gc_debug_file, "Begin heap consistency check...\n"));
-
- // Check that oldspace->newspace pointers are registered with the collector
- major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_consistency_callback, NULL);
-
- mono_sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_consistency_callback, NULL);
-
- DEBUG (1, fprintf (gc_debug_file, "Heap consistency check done.\n"));
+ mword *dest = _dest;
+ mword *src = _src;
- if (!binary_protocol_is_enabled ())
- g_assert (!missing_remsets);
+ while (size) {
+ if (bitmap & 0x1)
+ mono_gc_wbarrier_generic_store (dest, (MonoObject*)*src);
+ else
+ *dest = *src;
+ ++src;
+ ++dest;
+ size -= SIZEOF_VOID_P;
+ bitmap >>= 1;
+ }
}
-
+#ifdef SGEN_BINARY_PROTOCOL
#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr) && !LOAD_VTABLE (*(ptr))) \
- g_error ("Could not load vtable for obj %p slot %d (size %d)", obj, (char*)ptr - (char*)obj, safe_object_get_size ((MonoObject*)obj)); \
+#define HANDLE_PTR(ptr,obj) do { \
+ gpointer o = *(gpointer*)(ptr); \
+ if ((o)) { \
+ gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
+ binary_protocol_wbarrier (d, o, (gpointer) LOAD_VTABLE (o)); \
+ } \
} while (0)
static void
-check_major_refs_callback (char *start, size_t size, void *dummy)
+scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
{
-#define SCAN_OBJECT_ACTION
+#define SCAN_OBJECT_NOVTABLE
#include "sgen-scan-object.h"
}
+#endif
-static void
-check_major_refs (void)
+void
+mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
{
- major_collector.iterate_objects (TRUE, TRUE, (IterateObjectCallbackFunc)check_major_refs_callback, NULL);
- mono_sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_major_refs_callback, NULL);
-}
+ HEAVY_STAT (++stat_wbarrier_value_copy);
+ g_assert (klass->valuetype);
-/* Check that the reference is valid */
-#undef HANDLE_PTR
-#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr)) { \
- g_assert (safe_name (*(ptr)) != NULL); \
- } \
- } while (0)
+ DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d, descr %p for class %s (%p)\n", dest, count, klass->gc_descr, klass->name, klass));
-/*
- * check_object:
+ if (ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
+ size_t element_size = mono_class_value_size (klass, NULL);
+ size_t size = count * element_size;
+ mono_gc_memmove (dest, src, size);
+ return;
+ }
+
+#ifdef SGEN_BINARY_PROTOCOL
+ {
+ int i;
+ for (i = 0; i < count; ++i) {
+ scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
+ (char*)src + i * element_size - sizeof (MonoObject),
+ (mword) klass->gc_descr);
+ }
+ }
+#endif
+
+ remset.wbarrier_value_copy (dest, src, count, klass);
+}
+
+/**
+ * mono_gc_wbarrier_object_copy:
*
- * Perform consistency check on an object. Currently we only check that the
- * reference fields are valid.
+ * Write barrier to call when obj is the result of a clone or copy of an object.
*/
void
-check_object (char *start)
+mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
{
- if (!start)
- return;
+ int size;
-#include "sgen-scan-object.h"
+ HEAVY_STAT (++stat_wbarrier_object_copy);
+
+ if (ptr_in_nursery (obj) || ptr_on_stack (obj)) {
+ size = mono_object_class (obj)->instance_size;
+ mono_gc_memmove ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
+ size - sizeof (MonoObject));
+ return;
+ }
+
+#ifdef SGEN_BINARY_PROTOCOL
+ scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
+#endif
+
+ remset.wbarrier_object_copy (obj, src);
}
/*
mono_gc_collection_count (int generation)
{
if (generation == 0)
- return num_minor_gcs;
- return num_major_gcs;
+ return stat_minor_gcs;
+ return stat_major_gcs;
}
int64_t
return TRUE;
}
-
-void*
-mono_gc_alloc_fixed (size_t size, void *descr)
-{
- /* FIXME: do a single allocation */
- void *res = calloc (1, size);
- if (!res)
- return NULL;
- if (!mono_gc_register_root (res, size, descr)) {
- free (res);
- res = NULL;
- }
- return res;
-}
-
-void
-mono_gc_free_fixed (void* addr)
-{
- mono_gc_deregister_root (addr);
- free (addr);
-}
-
void*
mono_gc_invoke_with_gc_lock (MonoGCLockedCallbackFunc func, void *data)
{
{
return mono_runtime_is_critical_method (method) || mono_gc_is_critical_method (method);
}
-
+
void
mono_gc_base_init (void)
{
mono_threads_init (&cb, sizeof (SgenThreadInfo));
LOCK_INIT (interruption_mutex);
- LOCK_INIT (global_remset_mutex);
LOCK_INIT (pin_queue_mutex);
init_user_copy_or_mark_key ();
mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_STORE_REMSET, sizeof (GenericStoreRememberedSet));
mono_sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
- mono_native_tls_alloc (&remembered_set_key, NULL);
-
#ifndef HAVE_KW_THREAD
mono_native_tls_alloc (&thread_info_key, NULL);
#endif
num_workers = 16;
///* Keep this the default for now */
+#ifdef __APPLE__
conservative_stack_mark = TRUE;
+#endif
if (opts) {
for (ptr = opts; *ptr; ++ptr) {
long val;
opt = strchr (opt, '=') + 1;
if (*opt && mono_gc_parse_environment_string_extract_number (opt, &val)) {
- default_nursery_size = val;
+ mono_sgen_nursery_size = val;
#ifdef SGEN_ALIGN_NURSERY
if ((val & (val - 1))) {
fprintf (stderr, "The nursery size must be a power of two.\n");
exit (1);
}
- default_nursery_bits = 0;
- while (1 << (++ default_nursery_bits) != default_nursery_size)
+ mono_sgen_nursery_bits = 0;
+ while (1 << (++ mono_sgen_nursery_bits) != mono_sgen_nursery_size)
;
#endif
} else {
}
if (major_collector.is_parallel)
- workers_init (num_workers);
+ mono_sgen_workers_init (num_workers);
if (major_collector_opt)
g_free (major_collector_opt);
if (major_collector.post_param_init)
major_collector.post_param_init ();
- global_remset = alloc_remset (1024, NULL, FALSE);
- global_remset->next = NULL;
+ memset (&remset, 0, sizeof (remset));
+#ifdef SGEN_HAVE_CARDTABLE
if (use_cardtable)
- card_table_init ();
-
- gc_initialized = 1;
-}
-
-enum {
- ATYPE_NORMAL,
- ATYPE_VECTOR,
- ATYPE_SMALL,
- ATYPE_NUM
-};
-
-#ifdef HAVE_KW_THREAD
-#define EMIT_TLS_ACCESS(mb,dummy,offset) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), (offset)); \
- } while (0)
-#else
-
-/*
- * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin,
- * where the two are the same.
- */
-#if defined(__APPLE__) || defined (HOST_WIN32)
-#define EMIT_TLS_ACCESS(mb,member,dummy) do { \
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
- mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
- mono_mb_emit_i4 ((mb), thread_info_key); \
- mono_mb_emit_icon ((mb), G_STRUCT_OFFSET (SgenThreadInfo, member)); \
- mono_mb_emit_byte ((mb), CEE_ADD); \
- mono_mb_emit_byte ((mb), CEE_LDIND_I); \
- } while (0)
-#else
-#define EMIT_TLS_ACCESS(mb,member,dummy) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
-#endif
-
-#endif
-
-#ifdef MANAGED_ALLOCATION
-/* FIXME: Do this in the JIT, where specialized allocation sequences can be created
- * for each class. This is currently not easy to do, as it is hard to generate basic
- * blocks + branches, but it is easy with the linear IL codebase.
- *
- * For this to work we'd need to solve the TLAB race, first. Now we
- * require the allocator to be in a few known methods to make sure
- * that they are executed atomically via the restart mechanism.
- */
-static MonoMethod*
-create_allocator (int atype)
-{
- int p_var, size_var;
- guint32 slowpath_branch, max_size_branch;
- MonoMethodBuilder *mb;
- MonoMethod *res;
- MonoMethodSignature *csig;
- static gboolean registered = FALSE;
- int tlab_next_addr_var, new_next_var;
- int num_params, i;
- const char *name = NULL;
- AllocatorWrapperInfo *info;
-
-#ifdef HAVE_KW_THREAD
- int tlab_next_addr_offset = -1;
- int tlab_temp_end_offset = -1;
-
- MONO_THREAD_VAR_OFFSET (tlab_next_addr, tlab_next_addr_offset);
- MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
-
- g_assert (tlab_next_addr_offset != -1);
- g_assert (tlab_temp_end_offset != -1);
+ sgen_card_table_init (&remset);
+ else
#endif
+ mono_sgen_ssb_init (&remset);
- if (!registered) {
- mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
- mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
- registered = TRUE;
- }
-
- if (atype == ATYPE_SMALL) {
- num_params = 1;
- name = "AllocSmall";
- } else if (atype == ATYPE_NORMAL) {
- num_params = 1;
- name = "Alloc";
- } else if (atype == ATYPE_VECTOR) {
- num_params = 2;
- name = "AllocVector";
- } else {
- g_assert_not_reached ();
- }
-
- csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
- csig->ret = &mono_defaults.object_class->byval_arg;
- for (i = 0; i < num_params; ++i)
- csig->params [i] = &mono_defaults.int_class->byval_arg;
+ if (remset.register_thread)
+ remset.register_thread (mono_thread_info_current ());
- mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
- size_var = mono_mb_add_local (mb, &mono_defaults.int32_class->byval_arg);
- if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
- /* size = vtable->klass->instance_size; */
- mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
- mono_mb_emit_byte (mb, CEE_ADD);
- mono_mb_emit_byte (mb, CEE_LDIND_I);
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoClass, instance_size));
- mono_mb_emit_byte (mb, CEE_ADD);
- /* FIXME: assert instance_size stays a 4 byte integer */
- mono_mb_emit_byte (mb, CEE_LDIND_U4);
- mono_mb_emit_stloc (mb, size_var);
- } else if (atype == ATYPE_VECTOR) {
- MonoExceptionClause *clause;
- int pos, pos_leave;
- MonoClass *oom_exc_class;
- MonoMethod *ctor;
-
- /* n > MONO_ARRAY_MAX_INDEX -> OverflowException */
- mono_mb_emit_ldarg (mb, 1);
- mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
- pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
- mono_mb_emit_exception (mb, "OverflowException", NULL);
- mono_mb_patch_short_branch (mb, pos);
-
- clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
- clause->try_offset = mono_mb_get_label (mb);
-
- /* vtable->klass->sizes.element_size */
- mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoVTable, klass));
- mono_mb_emit_byte (mb, CEE_ADD);
- mono_mb_emit_byte (mb, CEE_LDIND_I);
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (MonoClass, sizes.element_size));
- mono_mb_emit_byte (mb, CEE_ADD);
- mono_mb_emit_byte (mb, CEE_LDIND_U4);
-
- /* * n */
- mono_mb_emit_ldarg (mb, 1);
- mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
- /* + sizeof (MonoArray) */
- mono_mb_emit_icon (mb, sizeof (MonoArray));
- mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
- mono_mb_emit_stloc (mb, size_var);
-
- pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
-
- /* catch */
- clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
- clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
- clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
- "System", "OverflowException");
- g_assert (clause->data.catch_class);
- clause->handler_offset = mono_mb_get_label (mb);
-
- oom_exc_class = mono_class_from_name (mono_defaults.corlib,
- "System", "OutOfMemoryException");
- g_assert (oom_exc_class);
- ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
- g_assert (ctor);
-
- mono_mb_emit_byte (mb, CEE_POP);
- mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
- mono_mb_emit_byte (mb, CEE_THROW);
-
- clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
- mono_mb_set_clauses (mb, 1, clause);
- mono_mb_patch_branch (mb, pos_leave);
- /* end catch */
- } else {
- g_assert_not_reached ();
- }
-
- /* size += ALLOC_ALIGN - 1; */
- mono_mb_emit_ldloc (mb, size_var);
- mono_mb_emit_icon (mb, ALLOC_ALIGN - 1);
- mono_mb_emit_byte (mb, CEE_ADD);
- /* size &= ~(ALLOC_ALIGN - 1); */
- mono_mb_emit_icon (mb, ~(ALLOC_ALIGN - 1));
- mono_mb_emit_byte (mb, CEE_AND);
- mono_mb_emit_stloc (mb, size_var);
-
- /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
- if (atype != ATYPE_SMALL) {
- mono_mb_emit_ldloc (mb, size_var);
- mono_mb_emit_icon (mb, MAX_SMALL_OBJ_SIZE);
- max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_UN_S);
- }
-
- /*
- * We need to modify tlab_next, but the JIT only supports reading, so we read
- * another tls var holding its address instead.
- */
-
- /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
- tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
- EMIT_TLS_ACCESS (mb, tlab_next_addr, tlab_next_addr_offset);
- mono_mb_emit_stloc (mb, tlab_next_addr_var);
-
- /* p = (void**)tlab_next; */
- p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
- mono_mb_emit_ldloc (mb, tlab_next_addr_var);
- mono_mb_emit_byte (mb, CEE_LDIND_I);
- mono_mb_emit_stloc (mb, p_var);
-
- /* new_next = (char*)p + size; */
- new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
- mono_mb_emit_ldloc (mb, p_var);
- mono_mb_emit_ldloc (mb, size_var);
- mono_mb_emit_byte (mb, CEE_CONV_I);
- mono_mb_emit_byte (mb, CEE_ADD);
- mono_mb_emit_stloc (mb, new_next_var);
-
- /* if (G_LIKELY (new_next < tlab_temp_end)) */
- mono_mb_emit_ldloc (mb, new_next_var);
- EMIT_TLS_ACCESS (mb, tlab_temp_end, tlab_temp_end_offset);
- slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
-
- /* Slowpath */
- if (atype != ATYPE_SMALL)
- mono_mb_patch_short_branch (mb, max_size_branch);
-
- mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
- mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
-
- /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
- mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_ldloc (mb, size_var);
- if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
- mono_mb_emit_icall (mb, mono_gc_alloc_obj);
- } else if (atype == ATYPE_VECTOR) {
- mono_mb_emit_ldarg (mb, 1);
- mono_mb_emit_icall (mb, mono_gc_alloc_vector);
- } else {
- g_assert_not_reached ();
- }
- mono_mb_emit_byte (mb, CEE_RET);
-
- /* Fastpath */
- mono_mb_patch_short_branch (mb, slowpath_branch);
-
- /* FIXME: Memory barrier */
-
- /* tlab_next = new_next */
- mono_mb_emit_ldloc (mb, tlab_next_addr_var);
- mono_mb_emit_ldloc (mb, new_next_var);
- mono_mb_emit_byte (mb, CEE_STIND_I);
-
- /*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX);
- mono_mb_emit_op (mb, CEE_MONO_MEMORY_BARRIER, StoreStoreBarrier);
-
- /* *p = vtable; */
- mono_mb_emit_ldloc (mb, p_var);
- mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_byte (mb, CEE_STIND_I);
-
- if (atype == ATYPE_VECTOR) {
- /* arr->max_length = max_length; */
- mono_mb_emit_ldloc (mb, p_var);
- mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (MonoArray, max_length));
- mono_mb_emit_ldarg (mb, 1);
- mono_mb_emit_byte (mb, CEE_STIND_I);
- }
-
- /*
- We must make sure both vtable and max_length are globaly visible before returning to managed land.
- */
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX);
- mono_mb_emit_op (mb, CEE_MONO_MEMORY_BARRIER, StoreStoreBarrier);
-
- /* return p */
- mono_mb_emit_ldloc (mb, p_var);
- mono_mb_emit_byte (mb, CEE_RET);
-
- res = mono_mb_create_method (mb, csig, 8);
- mono_mb_free (mb);
- mono_method_get_header (res)->init_locals = FALSE;
-
- info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
- info->gc_name = "sgen";
- info->alloc_type = atype;
- mono_marshal_set_wrapper_info (res, info);
-
- return res;
+ gc_initialized = 1;
}
-#endif
const char *
mono_gc_get_gc_name (void)
return "sgen";
}
-static MonoMethod* alloc_method_cache [ATYPE_NUM];
static MonoMethod *write_barrier_method;
static gboolean
mono_gc_is_critical_method (MonoMethod *method)
{
- int i;
- if (method == write_barrier_method)
- return TRUE;
-
- for (i = 0; i < ATYPE_NUM; ++i)
- if (method == alloc_method_cache [i])
- return TRUE;
-
- return FALSE;
+ return (method == write_barrier_method || mono_sgen_is_managed_allocator (method));
}
static gboolean
return mono_gc_is_critical_method (ji->method);
}
-/*
- * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
- * The signature of the called method is:
- * object allocate (MonoVTable *vtable)
- */
-MonoMethod*
-mono_gc_get_managed_allocator (MonoVTable *vtable, gboolean for_box)
-{
-#ifdef MANAGED_ALLOCATION
- MonoClass *klass = vtable->klass;
-
-#ifdef HAVE_KW_THREAD
- int tlab_next_offset = -1;
- int tlab_temp_end_offset = -1;
- MONO_THREAD_VAR_OFFSET (tlab_next, tlab_next_offset);
- MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
-
- if (tlab_next_offset == -1 || tlab_temp_end_offset == -1)
- return NULL;
-#endif
-
- if (!mono_runtime_has_tls_get ())
- return NULL;
- if (klass->instance_size > tlab_size)
- return NULL;
- if (klass->has_finalize || klass->marshalbyref || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
- return NULL;
- if (klass->rank)
- return NULL;
- if (klass->byval_arg.type == MONO_TYPE_STRING)
- return NULL;
- if (collect_before_allocs)
- return NULL;
-
- if (ALIGN_TO (klass->instance_size, ALLOC_ALIGN) < MAX_SMALL_OBJ_SIZE)
- return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
- else
- return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
-#else
- return NULL;
-#endif
-}
-
-MonoMethod*
-mono_gc_get_managed_array_allocator (MonoVTable *vtable, int rank)
-{
-#ifdef MANAGED_ALLOCATION
- MonoClass *klass = vtable->klass;
-
-#ifdef HAVE_KW_THREAD
- int tlab_next_offset = -1;
- int tlab_temp_end_offset = -1;
- MONO_THREAD_VAR_OFFSET (tlab_next, tlab_next_offset);
- MONO_THREAD_VAR_OFFSET (tlab_temp_end, tlab_temp_end_offset);
-
- if (tlab_next_offset == -1 || tlab_temp_end_offset == -1)
- return NULL;
-#endif
-
- if (rank != 1)
- return NULL;
- if (!mono_runtime_has_tls_get ())
- return NULL;
- if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
- return NULL;
- if (collect_before_allocs)
- return NULL;
- g_assert (!mono_class_has_finalizer (klass) && !klass->marshalbyref);
-
- return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
-#else
- return NULL;
-#endif
-}
-
-MonoMethod*
-mono_gc_get_managed_allocator_by_type (int atype)
-{
-#ifdef MANAGED_ALLOCATION
- MonoMethod *res;
-
- if (!mono_runtime_has_tls_get ())
- return NULL;
-
- mono_loader_lock ();
- res = alloc_method_cache [atype];
- if (!res)
- res = alloc_method_cache [atype] = create_allocator (atype);
- mono_loader_unlock ();
- return res;
-#else
- return NULL;
-#endif
-}
-
-guint32
-mono_gc_get_managed_allocator_types (void)
-{
- return ATYPE_NUM;
-}
-
static void
emit_nursery_check (MonoMethodBuilder *mb, int *nursery_check_return_labels)
{
mono_mb_emit_ldarg (mb, 0);
mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
mono_mb_emit_byte (mb, CEE_SHR_UN);
- mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
+ mono_mb_emit_icon (mb, (mword)mono_sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
nursery_check_return_labels [0] = mono_mb_emit_branch (mb, CEE_BEQ);
// if (!ptr_in_nursery (*ptr)) return;
mono_mb_emit_byte (mb, CEE_LDIND_I);
mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
mono_mb_emit_byte (mb, CEE_SHR_UN);
- mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
+ mono_mb_emit_icon (mb, (mword)mono_sgen_get_nursery_start () >> DEFAULT_NURSERY_BITS);
nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BNE_UN);
#else
int label_continue1, label_continue2;
int dereferenced_var;
- // if (ptr < (nursery_start)) goto continue;
+ // if (ptr < (mono_sgen_get_nursery_start ())) goto continue;
mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_ptr (mb, (gpointer) nursery_start);
+ mono_mb_emit_ptr (mb, (gpointer) mono_sgen_get_nursery_start ());
label_continue_1 = mono_mb_emit_branch (mb, CEE_BLT);
- // if (ptr >= nursery_end)) goto continue;
+ // if (ptr >= mono_sgen_get_nursery_end ())) goto continue;
mono_mb_emit_ldarg (mb, 0);
- mono_mb_emit_ptr (mb, (gpointer) nursery_end);
+ mono_mb_emit_ptr (mb, (gpointer) mono_sgen_get_nursery_end ());
label_continue_2 = mono_mb_emit_branch (mb, CEE_BGE);
// Otherwise return
mono_mb_emit_byte (mb, CEE_LDIND_I);
mono_mb_emit_stloc (mb, dereferenced_var);
- // if (*ptr < nursery_start) return;
+ // if (*ptr < mono_sgen_get_nursery_start ()) return;
mono_mb_emit_ldloc (mb, dereferenced_var);
- mono_mb_emit_ptr (mb, (gpointer) nursery_start);
+ mono_mb_emit_ptr (mb, (gpointer) mono_sgen_get_nursery_start ());
nursery_check_return_labels [1] = mono_mb_emit_branch (mb, CEE_BLT);
- // if (*ptr >= nursery_end) return;
+ // if (*ptr >= mono_sgen_get_nursery_end ()) return;
mono_mb_emit_ldloc (mb, dereferenced_var);
- mono_mb_emit_ptr (mb, (gpointer) nursery_end);
+ mono_mb_emit_ptr (mb, (gpointer) mono_sgen_get_nursery_end ());
nursery_check_return_labels [2] = mono_mb_emit_branch (mb, CEE_BGE);
#endif
}
UNLOCK_GC;
}
+void
+sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
+{
+ major_collector.iterate_live_block_ranges (callback);
+}
+
+void
+sgen_major_collector_scan_card_table (SgenGrayQueue *queue)
+{
+ major_collector.scan_card_table (queue);
+}
+
+SgenMajorCollector*
+mono_sgen_get_major_collector (void)
+{
+ return &major_collector;
+}
+
+void mono_gc_set_skip_thread (gboolean skip)
+{
+ SgenThreadInfo *info = mono_thread_info_current ();
+
+ LOCK_GC;
+ info->gc_disabled = skip;
+ UNLOCK_GC;
+}
+
+SgenRemeberedSet*
+mono_sgen_get_remset (void)
+{
+ return &remset;
+}
+
+guint
+mono_gc_get_vtable_bits (MonoClass *class)
+{
+ if (mono_sgen_need_bridge_processing () && mono_sgen_is_bridge_class (class))
+ return SGEN_GC_BIT_BRIDGE_OBJECT;
+ return 0;
+}
+
#endif /* HAVE_SGEN_GC */