* Copyright 2005-2011 Novell, Inc (http://www.novell.com)
* Copyright 2011 Xamarin Inc (http://www.xamarin.com)
* Copyright 2011 Xamarin, Inc.
+ * Copyright (C) 2012 Xamarin Inc
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License 2.0 as published by the Free Software Foundation;
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License 2.0 along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
#include "metadata/sgen-gc.h"
#include "metadata/sgen-protocol.h"
+#include "metadata/sgen-memory-governor.h"
#include "metadata/profiler-private.h"
+#include "metadata/marshal.h"
+#include "metadata/method-builder.h"
#include "utils/mono-memory-model.h"
+#include "utils/mono-counters.h"
#define ALIGN_UP SGEN_ALIGN_UP
+#define ALLOC_ALIGN SGEN_ALLOC_ALIGN
+#define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
+#define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
+#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+
+#define OPDEF(a,b,c,d,e,f,g,h,i,j) \
+ a = i,
+
+enum {
+#include "mono/cil/opcode.def"
+ CEE_LAST
+};
+
+#undef OPDEF
+
+static gboolean use_managed_allocator = TRUE;
+
+#ifdef HEAVY_STATISTICS
+static long long stat_objects_alloced = 0;
+static long long stat_bytes_alloced = 0;
+static long long stat_bytes_alloced_los = 0;
+
+#endif
/*
* Allocation is done from a Thread Local Allocation Buffer (TLAB). TLABs are allocated
#define TLAB_REAL_END (__thread_info__->tlab_real_end)
#endif
-static inline void
-set_nursery_scan_start (char *p)
-{
- int idx = (p - (char*)nursery_section->data) / SGEN_SCAN_START_SIZE;
- char *old = nursery_section->scan_starts [idx];
- if (!old || old > p)
- nursery_section->scan_starts [idx] = p;
-}
-
static void*
alloc_degraded (MonoVTable *vtable, size_t size, gboolean for_mature)
{
static int last_major_gc_warned = -1;
static int num_degraded = 0;
+ void *p;
+
if (!for_mature) {
if (last_major_gc_warned < stat_major_gcs) {
++num_degraded;
fprintf (stderr, "Warning: Repeated degraded allocation. Consider increasing nursery-size.\n");
last_major_gc_warned = stat_major_gcs;
}
+ InterlockedExchangeAdd (°raded_mode, size);
+ sgen_ensure_free_space (size);
+ } else {
+ if (sgen_need_major_collection (size))
+ sgen_perform_collection (size, GENERATION_OLD, "mature allocation failure");
}
- if (mono_sgen_need_major_collection (0)) {
- sgen_collect_major_no_lock ("degraded overflow");
+
+ p = major_collector.alloc_degraded (vtable, size);
+
+ if (for_mature) {
+ MONO_GC_MAJOR_OBJ_ALLOC_MATURE ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
+ } else {
+ binary_protocol_alloc_degraded (p, vtable, size);
+ MONO_GC_MAJOR_OBJ_ALLOC_DEGRADED ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
}
- return major_collector.alloc_degraded (vtable, size);
+ return p;
}
/*
g_assert (vtable->gc_descr);
- if (G_UNLIKELY (collect_before_allocs)) {
+ if (G_UNLIKELY (has_per_allocation_action)) {
static int alloc_count;
-
- InterlockedIncrement (&alloc_count);
- if (((alloc_count % collect_before_allocs) == 0) && nursery_section) {
- mono_sgen_collect_nursery_no_lock (0);
- if (!degraded_mode && !mono_sgen_can_alloc_size (size) && size <= SGEN_MAX_SMALL_OBJ_SIZE) {
- // FIXME:
- g_assert_not_reached ();
+ int current_alloc = InterlockedIncrement (&alloc_count);
+
+ if (collect_before_allocs) {
+ if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
+ sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered");
+ if (!degraded_mode && sgen_can_alloc_size (size) && size <= SGEN_MAX_SMALL_OBJ_SIZE) {
+ // FIXME:
+ g_assert_not_reached ();
+ }
}
+ } else if (verify_before_allocs) {
+ if ((current_alloc % verify_before_allocs) == 0)
+ sgen_check_whole_heap_stw ();
}
}
*/
if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
- p = mono_sgen_los_alloc_large_inner (vtable, size);
+ p = sgen_los_alloc_large_inner (vtable, size);
} else {
/* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
* visible before the vtable store.
*/
- DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
+ SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
binary_protocol_alloc (p , vtable, size);
+ if (G_UNLIKELY (MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ()))
+ MONO_GC_NURSERY_OBJ_ALLOC ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
g_assert (*p == NULL);
mono_atomic_store_seq (p, vtable);
/* when running in degraded mode, we continue allocing that way
* for a while, to decrease the number of useless nursery collections.
*/
- if (degraded_mode && degraded_mode < default_nursery_size) {
- p = alloc_degraded (vtable, size, FALSE);
- binary_protocol_alloc_degraded (p, vtable, size);
- return p;
- }
+ if (degraded_mode && degraded_mode < DEFAULT_NURSERY_SIZE)
+ return alloc_degraded (vtable, size, FALSE);
available_in_tlab = TLAB_REAL_END - TLAB_NEXT;
if (size > tlab_size || available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
/* Allocate directly from the nursery */
do {
- p = mono_sgen_nursery_alloc (size);
+ p = sgen_nursery_alloc (size);
if (!p) {
- mono_sgen_minor_collect_or_expand_inner (size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size, FALSE);
- binary_protocol_alloc_degraded (p, vtable, size);
- return p;
- } else {
- p = mono_sgen_nursery_alloc (size);
- }
+ sgen_ensure_free_space (size);
+ if (degraded_mode)
+ return alloc_degraded (vtable, size, FALSE);
+ else
+ p = sgen_nursery_alloc (size);
}
} while (!p);
if (!p) {
memset (p, 0, size);
}
} else {
- int alloc_size = 0;
+ size_t alloc_size = 0;
if (TLAB_START)
- DEBUG (3, fprintf (gc_debug_file, "Retire TLAB: %p-%p [%ld]\n", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size)));
- mono_sgen_nursery_retire_region (p, available_in_tlab);
+ SGEN_LOG (3, "Retire TLAB: %p-%p [%ld]", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size));
+ sgen_nursery_retire_region (p, available_in_tlab);
do {
- p = mono_sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
+ p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
if (!p) {
- mono_sgen_minor_collect_or_expand_inner (tlab_size);
- if (degraded_mode) {
- p = alloc_degraded (vtable, size, FALSE);
- binary_protocol_alloc_degraded (p, vtable, size);
- return p;
- } else {
- p = mono_sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
- }
+ sgen_ensure_free_space (tlab_size);
+ if (degraded_mode)
+ return alloc_degraded (vtable, size, FALSE);
+ else
+ p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
}
} while (!p);
/* Allocate from the TLAB */
p = (void*)TLAB_NEXT;
TLAB_NEXT += size;
- set_nursery_scan_start ((char*)p);
+ sgen_set_nursery_scan_start ((char*)p);
}
} else {
/* Reached tlab_temp_end */
/* record the scan start so we can find pinned objects more easily */
- set_nursery_scan_start ((char*)p);
+ sgen_set_nursery_scan_start ((char*)p);
/* we just bump tlab_temp_end as well */
TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
- DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", TLAB_NEXT, TLAB_TEMP_END));
+ SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
}
}
if (G_LIKELY (p)) {
- DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
+ SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
binary_protocol_alloc (p, vtable, size);
+ if (G_UNLIKELY (MONO_GC_MAJOR_OBJ_ALLOC_LARGE_ENABLED ()|| MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ())) {
+ if (size > SGEN_MAX_SMALL_OBJ_SIZE)
+ MONO_GC_MAJOR_OBJ_ALLOC_LARGE ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
+ else
+ MONO_GC_NURSERY_OBJ_ALLOC ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
+ }
mono_atomic_store_seq (p, vtable);
}
if (G_UNLIKELY (size > tlab_size)) {
/* Allocate directly from the nursery */
- p = mono_sgen_nursery_alloc (size);
+ p = sgen_nursery_alloc (size);
if (!p)
return NULL;
- set_nursery_scan_start ((char*)p);
+ sgen_set_nursery_scan_start ((char*)p);
/*FIXME we should use weak memory ops here. Should help specially on x86. */
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
/* Second case, we overflowed temp end */
if (G_UNLIKELY (new_next >= TLAB_TEMP_END)) {
- set_nursery_scan_start (new_next);
+ sgen_set_nursery_scan_start (new_next);
/* we just bump tlab_temp_end as well */
TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
- DEBUG (5, fprintf (gc_debug_file, "Expanding local alloc: %p-%p\n", TLAB_NEXT, TLAB_TEMP_END));
+ SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
}
} else if (available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
/* Allocate directly from the nursery */
- p = mono_sgen_nursery_alloc (size);
+ p = sgen_nursery_alloc (size);
if (!p)
return NULL;
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
memset (p, 0, size);
} else {
- int alloc_size = 0;
+ size_t alloc_size = 0;
- mono_sgen_nursery_retire_region (p, available_in_tlab);
- new_next = mono_sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
+ sgen_nursery_retire_region (p, available_in_tlab);
+ new_next = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
p = (void**)new_next;
if (!p)
return NULL;
TLAB_NEXT = new_next + size;
TLAB_REAL_END = new_next + alloc_size;
TLAB_TEMP_END = new_next + MIN (SGEN_SCAN_START_SIZE, alloc_size);
- set_nursery_scan_start ((char*)p);
+ sgen_set_nursery_scan_start ((char*)p);
if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION)
memset (new_next, 0, alloc_size);
+
+ MONO_GC_NURSERY_TLAB_ALLOC ((mword)new_next, alloc_size);
}
}
HEAVY_STAT (++stat_objects_alloced);
HEAVY_STAT (stat_bytes_alloced += size);
- DEBUG (6, fprintf (gc_debug_file, "Allocated object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
+ SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
binary_protocol_alloc (p, vtable, size);
+ if (G_UNLIKELY (MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ()))
+ MONO_GC_NURSERY_OBJ_ALLOC ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
g_assert (*p == NULL); /* FIXME disable this in non debug builds */
mono_atomic_store_seq (p, vtable);
MonoArray *arr;
MonoArrayBounds *bounds;
+#ifndef DISABLE_CRITICAL_REGION
+ TLAB_ACCESS_INIT;
+ ENTER_CRITICAL_REGION;
+ arr = mono_gc_try_alloc_obj_nolock (vtable, size);
+ if (arr) {
+ /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
+ arr->max_length = max_length;
+
+ bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
+ arr->bounds = bounds;
+ EXIT_CRITICAL_REGION;
+ return arr;
+ }
+ EXIT_CRITICAL_REGION;
+#endif
+
LOCK_GC;
arr = mono_gc_alloc_obj_nolock (vtable, size);
if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
/* large objects are always pinned anyway */
- p = mono_sgen_los_alloc_large_inner (vtable, size);
+ p = sgen_los_alloc_large_inner (vtable, size);
} else {
- DEBUG (9, g_assert (vtable->klass->inited));
+ SGEN_ASSERT (9, vtable->klass->inited, "class %s:%s is not initialized", vtable->klass->name_space, vtable->klass->name);
p = major_collector.alloc_small_pinned_obj (size, SGEN_VTABLE_HAS_REFERENCES (vtable));
}
if (G_LIKELY (p)) {
- DEBUG (6, fprintf (gc_debug_file, "Allocated pinned object %p, vtable: %p (%s), size: %zd\n", p, vtable, vtable->klass->name, size));
+ SGEN_LOG (6, "Allocated pinned object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
+ if (size > SGEN_MAX_SMALL_OBJ_SIZE)
+ MONO_GC_MAJOR_OBJ_ALLOC_LARGE ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
+ else
+ MONO_GC_MAJOR_OBJ_ALLOC_PINNED ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
binary_protocol_alloc_pinned (p, vtable, size);
mono_atomic_store_seq (p, vtable);
}
}
void
-mono_sgen_init_tlab_info (SgenThreadInfo* info)
+sgen_init_tlab_info (SgenThreadInfo* info)
{
#ifndef HAVE_KW_THREAD
SgenThreadInfo *__thread_info__ = info;
* Clear the thread local TLAB variables for all threads.
*/
void
-mono_sgen_clear_tlabs (void)
+sgen_clear_tlabs (void)
{
SgenThreadInfo *info;
mono_mb_emit_ldloc (mb, p_var);
mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (MonoArray, max_length));
mono_mb_emit_ldarg (mb, 1);
+#ifdef MONO_BIG_ARRAYS
mono_mb_emit_byte (mb, CEE_STIND_I);
+#else
+ mono_mb_emit_byte (mb, CEE_STIND_I4);
+#endif
}
/*
return NULL;
if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
return NULL;
- if (collect_before_allocs)
+ if (has_per_allocation_action)
return NULL;
g_assert (!mono_class_has_finalizer (klass) && !klass->marshalbyref);
#endif
}
+void
+sgen_set_use_managed_allocator (gboolean flag)
+{
+ use_managed_allocator = flag;
+}
+
MonoMethod*
mono_gc_get_managed_allocator_by_type (int atype)
{
#ifdef MANAGED_ALLOCATION
MonoMethod *res;
+ if (!use_managed_allocator)
+ return NULL;
+
if (!mono_runtime_has_tls_get ())
return NULL;
}
gboolean
-mono_sgen_is_managed_allocator (MonoMethod *method)
+sgen_is_managed_allocator (MonoMethod *method)
{
int i;
return FALSE;
}
+gboolean
+sgen_has_managed_allocator (void)
+{
+ int i;
+
+ for (i = 0; i < ATYPE_NUM; ++i)
+ if (alloc_method_cache [i])
+ return TRUE;
+ return FALSE;
+}
+
+#ifdef HEAVY_STATISTICS
+void
+sgen_alloc_init_heavy_stats (void)
+{
+ mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced);
+ mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced);
+ mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
+}
+#endif
+
#endif /*HAVE_SGEN_GC*/