#define ALIGN_UP SGEN_ALIGN_UP
#define ALLOC_ALIGN SGEN_ALLOC_ALIGN
-#define ALLOC_ALIGN_BITS SGEN_ALLOC_ALIGN_BITS
#define MAX_SMALL_OBJ_SIZE SGEN_MAX_SMALL_OBJ_SIZE
#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
static gboolean use_managed_allocator = TRUE;
#ifdef HEAVY_STATISTICS
-static long long stat_objects_alloced = 0;
-static long long stat_bytes_alloced = 0;
-static long long stat_bytes_alloced_los = 0;
+static guint64 stat_objects_alloced = 0;
+static guint64 stat_bytes_alloced = 0;
+static guint64 stat_bytes_alloced_los = 0;
#endif
static __thread char *tlab_temp_end;
static __thread char *tlab_real_end;
/* Used by the managed allocator/wbarrier */
-static __thread char **tlab_next_addr;
+static __thread char **tlab_next_addr MONO_ATTR_USED;
#endif
#ifdef HAVE_KW_THREAD
/* FIXME: handle OOM */
void **p;
char *new_next;
+ size_t real_size = size;
TLAB_ACCESS_INIT;
+
+ CANARIFY_SIZE(size);
HEAVY_STAT (++stat_objects_alloced);
- if (size <= SGEN_MAX_SMALL_OBJ_SIZE)
+ if (real_size <= SGEN_MAX_SMALL_OBJ_SIZE)
HEAVY_STAT (stat_bytes_alloced += size);
else
HEAVY_STAT (stat_bytes_alloced_los += size);
if (collect_before_allocs) {
if (((current_alloc % collect_before_allocs) == 0) && nursery_section) {
sgen_perform_collection (0, GENERATION_NURSERY, "collect-before-alloc-triggered", TRUE);
- if (!degraded_mode && sgen_can_alloc_size (size) && size <= SGEN_MAX_SMALL_OBJ_SIZE) {
+ if (!degraded_mode && sgen_can_alloc_size (size) && real_size <= SGEN_MAX_SMALL_OBJ_SIZE) {
// FIXME:
g_assert_not_reached ();
}
* specially by the world-stopping code.
*/
- if (size > SGEN_MAX_SMALL_OBJ_SIZE) {
- p = sgen_los_alloc_large_inner (vtable, size);
+ if (real_size > SGEN_MAX_SMALL_OBJ_SIZE) {
+ p = sgen_los_alloc_large_inner (vtable, ALIGN_UP (real_size));
} else {
/* tlab_next and tlab_temp_end are TLS vars so accessing them might be expensive */
* visible before the vtable store.
*/
+ CANARIFY_ALLOC(p,real_size);
SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
binary_protocol_alloc (p , vtable, size);
if (G_UNLIKELY (MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ()))
available_in_tlab = (int)(TLAB_REAL_END - TLAB_NEXT);//We'll never have tlabs > 2Gb
if (size > tlab_size || available_in_tlab > SGEN_MAX_NURSERY_WASTE) {
/* Allocate directly from the nursery */
- do {
- p = sgen_nursery_alloc (size);
- if (!p) {
- sgen_ensure_free_space (size);
- if (degraded_mode)
- return alloc_degraded (vtable, size, FALSE);
- else
- p = sgen_nursery_alloc (size);
- }
- } while (!p);
+ p = sgen_nursery_alloc (size);
if (!p) {
- // no space left
- g_assert (0);
+ /*
+ * We couldn't allocate from the nursery, so we try
+ * collecting. Even after the collection, we might
+ * still not have enough memory to allocate the
+ * object. The reason will most likely be that we've
+ * run out of memory, but there is the theoretical
+ * possibility that other threads might have consumed
+ * the freed up memory ahead of us.
+ *
+ * What we do in this case is allocate degraded, i.e.,
+ * from the major heap.
+ *
+ * Ideally we'd like to detect the case of other
+ * threads allocating ahead of us and loop (if we
+ * always loop we will loop endlessly in the case of
+ * OOM).
+ */
+ sgen_ensure_free_space (real_size);
+ if (!degraded_mode)
+ p = sgen_nursery_alloc (size);
}
+ if (!p)
+ return alloc_degraded (vtable, size, FALSE);
zero_tlab_if_necessary (p, size);
} else {
SGEN_LOG (3, "Retire TLAB: %p-%p [%ld]", TLAB_START, TLAB_REAL_END, (long)(TLAB_REAL_END - TLAB_NEXT - size));
sgen_nursery_retire_region (p, available_in_tlab);
- do {
- p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
- if (!p) {
- sgen_ensure_free_space (tlab_size);
- if (degraded_mode)
- return alloc_degraded (vtable, size, FALSE);
- else
- p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
- }
- } while (!p);
-
+ p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
if (!p) {
- // no space left
- g_assert (0);
+ /* See comment above in similar case. */
+ sgen_ensure_free_space (tlab_size);
+ if (!degraded_mode)
+ p = sgen_nursery_alloc_range (tlab_size, size, &alloc_size);
}
+ if (!p)
+ return alloc_degraded (vtable, size, FALSE);
/* Allocate a new TLAB from the current nursery fragment */
TLAB_START = (char*)p;
TLAB_TEMP_END = MIN (TLAB_REAL_END, TLAB_NEXT + SGEN_SCAN_START_SIZE);
SGEN_LOG (5, "Expanding local alloc: %p-%p", TLAB_NEXT, TLAB_TEMP_END);
}
+ CANARIFY_ALLOC(p,real_size);
}
if (G_LIKELY (p)) {
SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
binary_protocol_alloc (p, vtable, size);
if (G_UNLIKELY (MONO_GC_MAJOR_OBJ_ALLOC_LARGE_ENABLED ()|| MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ())) {
- if (size > SGEN_MAX_SMALL_OBJ_SIZE)
+ if (real_size > SGEN_MAX_SMALL_OBJ_SIZE)
MONO_GC_MAJOR_OBJ_ALLOC_LARGE ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
else
MONO_GC_NURSERY_OBJ_ALLOC ((mword)p, size, vtable->klass->name_space, vtable->klass->name);
{
void **p;
char *new_next;
+ size_t real_size = size;
TLAB_ACCESS_INIT;
+ CANARIFY_SIZE(size);
+
size = ALIGN_UP (size);
- SGEN_ASSERT (9, size >= sizeof (MonoObject), "Object too small");
+ SGEN_ASSERT (9, real_size >= sizeof (MonoObject), "Object too small");
g_assert (vtable->gc_descr);
- if (size > SGEN_MAX_SMALL_OBJ_SIZE)
+ if (real_size > SGEN_MAX_SMALL_OBJ_SIZE)
return NULL;
if (G_UNLIKELY (size > tlab_size)) {
HEAVY_STAT (++stat_objects_alloced);
HEAVY_STAT (stat_bytes_alloced += size);
+ CANARIFY_ALLOC(p,real_size);
SGEN_LOG (6, "Allocated object %p, vtable: %p (%s), size: %zd", p, vtable, vtable->klass->name, size);
binary_protocol_alloc (p, vtable, size);
if (G_UNLIKELY (MONO_GC_NURSERY_OBJ_ALLOC_ENABLED ()))
mono_mb_emit_byte (mb, CEE_STIND_I);
/*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX);
- mono_mb_emit_op (mb, CEE_MONO_MEMORY_BARRIER, (gpointer)StoreStoreBarrier);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
/* *p = vtable; */
mono_mb_emit_ldloc (mb, p_var);
/*
We must make sure both vtable and max_length are globaly visible before returning to managed land.
*/
- mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX);
- mono_mb_emit_op (mb, CEE_MONO_MEMORY_BARRIER, (gpointer)StoreStoreBarrier);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
+ mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
/* return p */
mono_mb_emit_ldloc (mb, p_var);
void
sgen_alloc_init_heavy_stats (void)
{
- mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_objects_alloced);
- mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced);
- mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_bytes_alloced_los);
+ mono_counters_register ("# objects allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_objects_alloced);
+ mono_counters_register ("bytes allocated", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced);
+ mono_counters_register ("bytes allocated in LOS", MONO_COUNTER_GC | MONO_COUNTER_ULONG, &stat_bytes_alloced_los);
}
#endif