static LOCK_DECLARE (interruption_mutex);
static LOCK_DECLARE (global_remset_mutex);
+static LOCK_DECLARE (pin_queue_mutex);
#define LOCK_GLOBAL_REMSET pthread_mutex_lock (&global_remset_mutex)
#define UNLOCK_GLOBAL_REMSET pthread_mutex_unlock (&global_remset_mutex)
+#define LOCK_PIN_QUEUE pthread_mutex_lock (&pin_queue_mutex)
+#define UNLOCK_PIN_QUEUE pthread_mutex_unlock (&pin_queue_mutex)
+
typedef struct _FinalizeEntry FinalizeEntry;
struct _FinalizeEntry {
FinalizeEntry *next;
static mword max_heap_size = ((mword)0)- ((mword)1);
static mword allocated_heap;
+/*Object was pinned during the current collection*/
+static mword objects_pinned;
+
void
mono_sgen_release_space (mword size, int space)
{
}
}
+
+void
+mono_sgen_pin_object (void *object, GrayQueue *queue)
+{
+ if (major_collector.is_parallel) {
+ LOCK_PIN_QUEUE;
+ /*object arrives pinned*/
+ pin_stage_ptr (object);
+ ++objects_pinned ;
+ UNLOCK_PIN_QUEUE;
+ } else {
+ SGEN_PIN_OBJECT (object);
+ pin_stage_ptr (object);
+ ++objects_pinned;
+ }
+ GRAY_OBJECT_ENQUEUE (queue, object);
+}
+
/* Sort the addresses in array in increasing order.
* Done using a by-the book heap sort. Which has decent and stable performance, is pretty cache efficient.
*/
static gboolean
collect_nursery (size_t requested_size)
{
+ gboolean needs_major;
size_t max_garbage_amount;
char *orig_nursery_next;
TV_DECLARE (all_atv);
check_scan_starts ();
degraded_mode = 0;
+ objects_pinned = 0;
orig_nursery_next = nursery_next;
nursery_next = MAX (nursery_next, nursery_last_pinned_end);
/* FIXME: optimize later to use the higher address where an object can be present */
time_minor_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
mono_profiler_gc_event (MONO_GC_EVENT_MARK_END, 0);
+ if (objects_pinned) {
+ evacuate_pin_staging_area ();
+ optimize_pin_queue (0);
+ nursery_section->pin_queue_start = pin_queue;
+ nursery_section->pin_queue_num_entries = next_pin_slot;
+ }
+
/* walk the pin_queue, build up the fragment list of free memory, unmark
* pinned objects as we go, memzero() the empty fragments so they are ready for the
* next allocations.
binary_protocol_flush_buffers (FALSE);
+ /*objects are late pinned because of lack of memory, so a major is a good call*/
+ needs_major = need_major_collection (0) || objects_pinned;
current_collection_generation = -1;
+ objects_pinned = 0;
- return need_major_collection ();
+ return needs_major;
}
static void
char *heap_end = (char*)-1;
int old_num_major_sections = major_collector.get_num_major_sections ();
int num_major_sections, num_major_sections_saved, save_target, allowance_target;
+ int old_next_pin_slot;
mword los_memory_saved, los_memory_alloced, old_los_memory_usage;
mono_perfcounters->gc_collections1++;
*/
los_memory_alloced = los_memory_usage - MIN (last_los_memory_usage, los_memory_usage);
old_los_memory_usage = los_memory_usage;
+ objects_pinned = 0;
//count_ref_nonref_objs ();
//consistency_check ();
/* second pass for the sections */
mono_sgen_pin_objects_in_section (nursery_section, WORKERS_DISTRIBUTE_GRAY_QUEUE);
major_collector.pin_objects (WORKERS_DISTRIBUTE_GRAY_QUEUE);
+ old_next_pin_slot = next_pin_slot;
TV_GETTIME (btv);
time_major_pinning += TV_ELAPSED_MS (atv, btv);
TV_GETTIME (atv);
time_major_finish_gray_stack += TV_ELAPSED_MS (btv, atv);
+ if (objects_pinned) {
+ /*This is slow, but we just OOM'd*/
+ mono_sgen_pin_queue_clear_discarded_entries (nursery_section, old_next_pin_slot);
+ evacuate_pin_staging_area ();
+ optimize_pin_queue (0);
+ mono_sgen_find_section_pin_queue_start_end (nursery_section);
+ objects_pinned = 0;
+ }
+
/* sweep the big objects list */
prevbo = NULL;
for (bigobj = los_object_list; bigobj;) {
current_collection_generation = -1;
}
+void
+sgen_collect_major_no_lock (const char *reason)
+{
+ mono_profiler_gc_event (MONO_GC_EVENT_START, 1);
+ stop_world (1);
+ major_collection (reason);
+ restart_world (1);
+ mono_profiler_gc_event (MONO_GC_EVENT_END, 1);
+}
+
/*
* When deciding if it's better to collect or to expand, keep track
* of how much garbage was reclaimed with the last collection: if it's too
void*
mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size)
{
- /* FIXME: handle OOM */
void **p;
size = ALIGN_UP (size);
LOCK_GC;
+
if (size > MAX_SMALL_OBJ_SIZE) {
/* large objects are always pinned anyway */
p = alloc_large_inner (vtable, size);
LOCK_INIT (interruption_mutex);
LOCK_INIT (global_remset_mutex);
+ LOCK_INIT (pin_queue_mutex);
if ((env = getenv ("MONO_GC_PARAMS"))) {
opts = g_strsplit (env, ",", -1);
LOCK_MS_BLOCK_LIST;
- if (!free_blocks [size_index])
- ms_alloc_block (size_index, pinned, has_references);
+ if (!free_blocks [size_index]) {
+ if (G_UNLIKELY (!ms_alloc_block (size_index, pinned, has_references))) {
+ UNLOCK_MS_BLOCK_LIST;
+ return NULL;
+ }
+ }
block = free_blocks [size_index];
DEBUG (9, g_assert (block));
static void*
major_alloc_small_pinned_obj (size_t size, gboolean has_references)
{
- return alloc_obj (size, TRUE, has_references);
+ void *res = alloc_obj (size, TRUE, has_references);
+ /*If we failed to alloc memory, we better try releasing memory
+ *as pinned alloc is requested by the runtime.
+ */
+ if (!res) {
+ sgen_collect_major_no_lock ("pinned alloc failure");
+ res = alloc_obj (size, TRUE, has_references);
+ }
+ return res;
}
static void
void *obj;
int old_num_sections = num_major_sections;
obj = alloc_obj (size, FALSE, vtable->klass->has_references);
- *(MonoVTable**)obj = vtable;
- HEAVY_STAT (++stat_objects_alloced_degraded);
- HEAVY_STAT (stat_bytes_alloced_degraded += size);
- g_assert (num_major_sections >= old_num_sections);
- mono_sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
+ if (G_LIKELY (obj)) {
+ *(MonoVTable**)obj = vtable;
+ HEAVY_STAT (++stat_objects_alloced_degraded);
+ HEAVY_STAT (stat_bytes_alloced_degraded += size);
+ g_assert (num_major_sections >= old_num_sections);
+ mono_sgen_register_major_sections_alloced (num_major_sections - old_num_sections);
+ }
return obj;
}
has_references = SGEN_VTABLE_HAS_REFERENCES (vt);
destination = major_alloc_object (objsize, has_references);
+ if (G_UNLIKELY (!destination)) {
+ /*
+ * This can fail under 2 scenarios:
+ * - object was copied, we must update *ptr.
+ * - object was pinned, we can leave *ptr as is.
+ */
+ if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
+ mono_sgen_pin_object (obj, queue);
+ } else {
+ vtable_word = *(mword*)obj;
+ if (vtable_word & SGEN_FORWARDED_BIT)
+ *ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
+ }
+ return;
+ }
if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
gboolean was_marked;
if (ptr_in_nursery (obj)) {
int word, bit;
- char *forwarded;
+ char *forwarded, *old_obj;
if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
*ptr = forwarded;
HEAVY_STAT (++stat_objects_copied_major);
do_copy_object:
+ old_obj = obj;
obj = copy_object_no_checks (obj, queue);
+ if (G_UNLIKELY (old_obj == obj)) {
+ return;
+ }
*ptr = obj;
/*