* Author:
* Paolo Molaro (lupus@ximian.com)
*
- * Copyright (C) 2005-2006 Novell, Inc
+ * Copyright 2005-2009 Novell, Inc (http://www.novell.com)
*
* Thread start/stop adapted from Boehm's GC:
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
Multi-dim arrays have the same issue for rank == 1 for the bounds data.
*) implement a card table as the write barrier instead of remembered sets?
*) some sort of blacklist support?
- *) fin_ready_list is part of the root set, too
+ *) fin_ready_list and critical_fin_list are part of the root set, too
*) consider lowering the large object min size to 16/32KB or so and benchmark
*) once mark-compact is implemented we could still keep the
copying collector for the old generation and use it if we think
#include "metadata/mono-gc.h"
#include "metadata/method-builder.h"
#include "metadata/profiler-private.h"
+#include "metadata/monitor.h"
#include "utils/mono-mmap.h"
#ifdef HAVE_VALGRIND_MEMCHECK_H
/* If set, do a heap consistency check before each minor collection */
static gboolean consistency_check_at_minor_collection = FALSE;
+/*
void
mono_gc_flush_info (void)
{
fflush (gc_debug_file);
}
+*/
-#define MAX_DEBUG_LEVEL 9
+#define MAX_DEBUG_LEVEL 8
#define DEBUG(level,a) do {if (G_UNLIKELY ((level) <= MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) a;} while (0)
#define TV_DECLARE(name) struct timeval name
static NurseryClearPolicy nursery_clear_policy = CLEAR_AT_TLAB_CREATION;
+/*
+ * If this is set, the nursery is aligned to an address aligned to its size, ie.
+ * a 1MB nursery will be aligned to an address divisible by 1MB. This allows us to
+ * speed up ptr_in_nursery () checks which are very frequent. This requires the
+ * nursery size to be a compile time constant.
+ */
+#define ALIGN_NURSERY 1
+
/*
* The young generation is divided into fragments. This is because
* we can hand one fragments to a thread for lock-less fast alloc and
REMSET_LOCATION, /* just a pointer to the exact location */
REMSET_RANGE, /* range of pointer fields */
REMSET_OBJECT, /* mark all the object for scanning */
- REMSET_VTYPE, /* a valuetype described by a gc descriptor */
+ REMSET_OTHER, /* all others */
REMSET_TYPE_MASK = 0x3
};
+/* Subtypes of REMSET_OTHER */
+enum {
+ REMSET_VTYPE, /* a valuetype described by a gc descriptor */
+ REMSET_ROOT_LOCATION, /* a location inside a root */
+};
+
static __thread RememberedSet *remembered_set MONO_TLS_FAST;
static pthread_key_t remembered_set_key;
static RememberedSet *global_remset;
-static int store_to_global_remset = 0;
+//static int store_to_global_remset = 0;
/* FIXME: later choose a size that takes into account the RememberedSet struct
* and doesn't waste any alloc paddin space.
((mword*)(obj))[0] &= ~PINNED_BIT; \
} while (0)
+#ifdef ALIGN_NURSERY
+#define ptr_in_nursery(ptr) (((mword)(ptr) & ~((1 << DEFAULT_NURSERY_BITS) - 1)) == (mword)nursery_start)
+#else
+#define ptr_in_nursery(ptr) ((char*)(ptr) >= nursery_start && (char*)(ptr) < nursery_real_end)
+#endif
/*
* Since we set bits in the vtable, use the macro to load it from the pointer to
return vt->klass->name;
}
-static guint
+static inline guint
safe_object_get_size (MonoObject* o)
{
MonoClass *klass = ((MonoVTable*)LOAD_VTABLE (o))->klass;
} else if (klass->rank) {
MonoArray *array = (MonoArray*)o;
size_t size = sizeof (MonoArray) + mono_array_element_size (klass) * mono_array_length (array);
- if (array->bounds) {
+ if (G_UNLIKELY (array->bounds)) {
size += 3;
size &= ~3;
size += sizeof (MonoArrayBounds) * klass->rank;
/* good sizes are 512KB-1MB: larger ones increase a lot memzeroing time */
//#define DEFAULT_NURSERY_SIZE (1024*512*125+4096*118)
#define DEFAULT_NURSERY_SIZE (1024*512*2)
+/* The number of trailing 0 bits in DEFAULT_NURSERY_SIZE */
+#define DEFAULT_NURSERY_BITS 20
#define DEFAULT_MAX_SECTION (DEFAULT_NURSERY_SIZE * 16)
#define DEFAULT_LOS_COLLECTION_TARGET (DEFAULT_NURSERY_SIZE * 2)
/* to quickly find the head of an object pinned by a conservative address
struct _FinalizeEntry {
FinalizeEntry *next;
void *object;
- void *data; /* can be a disappearing link or the data for the finalizer */
- /* Note we could use just one pointer if we don't support multiple callbacks
- * for finalizers and per-finalizer data and if we store the obj pointers
- * in the link like libgc does
- */
};
+typedef struct _FinalizeEntryHashTable FinalizeEntryHashTable;
+struct _FinalizeEntryHashTable {
+ FinalizeEntry **table;
+ mword size;
+ int num_registered;
+};
+
+typedef struct _DisappearingLink DisappearingLink;
+struct _DisappearingLink {
+ DisappearingLink *next;
+ void **link;
+};
+
+typedef struct _DisappearingLinkHashTable DisappearingLinkHashTable;
+struct _DisappearingLinkHashTable {
+ DisappearingLink **table;
+ mword size;
+ int num_links;
+};
+
+enum {
+ GENERATION_NURSERY,
+ GENERATION_OLD,
+ GENERATION_MAX
+};
+
+/*
+ * The link pointer is hidden by negating each bit. We use the lowest
+ * bit of the link (before negation) to store whether it needs
+ * resurrection tracking.
+ */
+#define HIDE_POINTER(p,t) ((gpointer)(~((gulong)(p)|((t)?1:0))))
+#define REVEAL_POINTER(p) ((gpointer)((~(gulong)(p))&~3L))
+
+#define DISLINK_OBJECT(d) (REVEAL_POINTER (*(d)->link))
+#define DISLINK_TRACK(d) ((~(gulong)(*(d)->link)) & 1)
+
/*
* The finalizable hash has the object as the key, the
* disappearing_link hash, has the link address as key.
*/
-static FinalizeEntry **finalizable_hash = NULL;
+static FinalizeEntryHashTable minor_finalizable_hash;
+static FinalizeEntryHashTable major_finalizable_hash;
/* objects that are ready to be finalized */
static FinalizeEntry *fin_ready_list = NULL;
-/* disappearing links use the same structure but a different list */
-static FinalizeEntry **disappearing_link_hash = NULL;
-static mword disappearing_link_hash_size = 0;
-static mword finalizable_hash_size = 0;
+static FinalizeEntry *critical_fin_list = NULL;
+
+static DisappearingLinkHashTable minor_disappearing_link_hash;
+static DisappearingLinkHashTable major_disappearing_link_hash;
-static int num_registered_finalizers = 0;
static int num_ready_finalizers = 0;
-static int num_disappearing_links = 0;
static int no_finalize = 0;
/* keep each size a multiple of ALLOC_ALIGN */
return FALSE;
}
+static int slot_for_size (size_t size);
+
+static void
+free_pinned_object (PinnedChunk *chunk, char *obj, size_t size)
+{
+ void **p = (void**)obj;
+ int slot = slot_for_size (size);
+
+ g_assert (obj >= (char*)chunk->start_data && obj < ((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE));
+ *p = chunk->free_list [slot];
+ chunk->free_list [slot] = p;
+}
+
+enum {
+ ROOT_TYPE_NORMAL = 0, /* "normal" roots */
+ ROOT_TYPE_PINNED = 1, /* roots without a GC descriptor */
+ ROOT_TYPE_WBARRIER = 2, /* roots with a write barrier */
+ ROOT_TYPE_NUM
+};
+
/* registered roots: the key to the hash is the root start address */
-static RootRecord **roots_hash = NULL;
-static int roots_hash_size = 0;
+/*
+ * Different kinds of roots are kept separate to speed up pin_from_roots () for example.
+ */
+static RootRecord **roots_hash [ROOT_TYPE_NUM] = { NULL, NULL };
+static int roots_hash_size [ROOT_TYPE_NUM] = { 0, 0, 0 };
static mword roots_size = 0; /* amount of memory in the root set */
-static int num_roots_entries = 0;
+static int num_roots_entries [ROOT_TYPE_NUM] = { 0, 0, 0 };
/*
* The current allocation cursors
static char *nursery_next = NULL;
static char *nursery_frag_real_end = NULL;
static char *nursery_real_end = NULL;
-static char *nursery_first_pinned_start = NULL;
+//static char *nursery_first_pinned_start = NULL;
static char *nursery_last_pinned_end = NULL;
/* The size of a TLAB */
/*
* used when moving the objects
* When the nursery is collected, objects are copied to to_space.
- * The area between to_space and gray_objects is used as a stack
+ * The area between gray_first and gray_objects is used as a stack
* of objects that need their fields checked for more references
* to be copied.
* We should optimize somehow this mechanism to avoid rescanning
* test cache misses and other graph traversal orders.
*/
static char *to_space = NULL;
+static char *gray_first = NULL;
static char *gray_objects = NULL;
static char *to_space_end = NULL;
static GCMemSection *to_space_section = NULL;
/* objects bigger then this go into the large object space */
#define MAX_SMALL_OBJ_SIZE 0xffff
+/* Functions supplied by the runtime to be called by the GC */
+static MonoGCCallbacks gc_callbacks;
+
/*
* ######################################################################
* ######## Macros and function declarations.
static void free_internal_mem (void *addr);
static void* get_os_memory (size_t size, int activate);
static void free_os_memory (void *addr, size_t size);
-static void report_internal_mem_usage (void);
+static G_GNUC_UNUSED void report_internal_mem_usage (void);
static int stop_world (void);
static int restart_world (void);
-static void pin_thread_data (void *start_nursery, void *end_nursery);
+static void scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise);
static void scan_from_remsets (void *start_nursery, void *end_nursery);
static void find_pinning_ref_from_thread (char *obj, size_t size);
static void update_current_thread_stack (void *start);
static GCMemSection* alloc_section (size_t size);
-static void finalize_in_range (char *start, char *end);
-static void null_link_in_range (char *start, char *end);
+static void finalize_in_range (char *start, char *end, int generation);
+static void add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track,
+ DisappearingLinkHashTable *hash);
+static void null_link_in_range (char *start, char *end, int generation);
+static void null_links_for_domain (MonoDomain *domain, int generation);
static gboolean search_fragment_for_size (size_t size);
static void mark_pinned_from_addresses (PinnedChunk *chunk, void **start, void **end);
static void clear_remsets (void);
static void clear_tlabs (void);
static char *find_tlab_next_from_address (char *addr);
+static void scan_pinned_objects (void (*callback) (PinnedChunk*, char*, size_t, void*), void *callback_data);
static void sweep_pinned_objects (void);
static void scan_from_pinned_objects (char *addr_start, char *addr_end);
static void free_large_object (LOSObject *obj);
static void free_mem_section (GCMemSection *section);
+static void mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track);
+
void describe_ptr (char *ptr);
void check_consistency (void);
char* check_object (char *start);
#define ALLOC_ALIGN 8
-/* Root bitmap descriptors are simpler: the lower two bits describe the type
+/* Root bitmap descriptors are simpler: the lower three bits describe the type
* and we either have 30/62 bitmap bits or nibble-based run-length,
- * or a complex descriptor
+ * or a complex descriptor, or a user defined marker function.
*/
enum {
ROOT_DESC_CONSERVATIVE, /* 0, so matches NULL value */
ROOT_DESC_BITMAP,
- ROOT_DESC_RUN_LEN,
- ROOT_DESC_LARGE_BITMAP,
- ROOT_DESC_TYPE_MASK = 0x3,
- ROOT_DESC_TYPE_SHIFT = 2,
+ ROOT_DESC_RUN_LEN,
+ ROOT_DESC_COMPLEX,
+ ROOT_DESC_USER,
+ ROOT_DESC_TYPE_MASK = 0x7,
+ ROOT_DESC_TYPE_SHIFT = 3,
};
+#define MAKE_ROOT_DESC(type,val) ((type) | ((val) << ROOT_DESC_TYPE_SHIFT))
+
+#define MAX_USER_DESCRIPTORS 16
+
static gsize* complex_descriptors = NULL;
static int complex_descriptors_size = 0;
static int complex_descriptors_next = 0;
+static MonoGCMarkFunc user_descriptors [MAX_USER_DESCRIPTORS];
+static int user_descriptors_next = 0;
static int
alloc_complex_descriptor (gsize *bitmap, int numbits)
stored_size += ALLOC_ALIGN - 1;
stored_size &= ~(ALLOC_ALIGN - 1);
for (i = 0; i < numbits; ++i) {
- if (bitmap [i / GC_BITS_PER_WORD] & (1 << (i % GC_BITS_PER_WORD))) {
+ if (bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
if (first_set < 0)
first_set = i;
last_set = i;
int first_set = -1, num_set = 0, last_set = -1, i;
mword desc = vector? DESC_TYPE_VECTOR: DESC_TYPE_ARRAY;
for (i = 0; i < numbits; ++i) {
- if (elem_bitmap [i / GC_BITS_PER_WORD] & (1 << (i % GC_BITS_PER_WORD))) {
+ if (elem_bitmap [i / GC_BITS_PER_WORD] & ((gsize)1 << (i % GC_BITS_PER_WORD))) {
if (first_set < 0)
first_set = i;
last_set = i;
return (void*) desc;
}
+/* Return the bitmap encoded by a descriptor */
+gsize*
+mono_gc_get_bitmap_for_descr (void *descr, int *numbits)
+{
+ mword d = (mword)descr;
+ gsize *bitmap;
+
+ switch (d & 0x7) {
+ case DESC_TYPE_RUN_LENGTH: {
+ int first_set = (d >> 16) & 0xff;
+ int num_set = (d >> 16) & 0xff;
+ int i;
+
+ bitmap = g_new0 (gsize, (first_set + num_set + 7) / 8);
+
+ for (i = first_set; i < first_set + num_set; ++i)
+ bitmap [i / GC_BITS_PER_WORD] |= ((gsize)1 << (i % GC_BITS_PER_WORD));
+
+ *numbits = first_set + num_set;
+
+ return bitmap;
+ }
+ case DESC_TYPE_SMALL_BITMAP:
+ bitmap = g_new0 (gsize, 1);
+
+ bitmap [0] = (d >> SMALL_BITMAP_SHIFT) << OBJECT_HEADER_WORDS;
+
+ *numbits = GC_BITS_PER_WORD;
+
+ return bitmap;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
/* helper macros to scan and traverse objects, macros because we resue them in many functions */
#define STRING_SIZE(size,str) do { \
(size) = sizeof (MonoString) + 2 * (mono_string_length ((MonoString*)(str)) + 1); \
type_rlen++;
continue;
} else if (type == DESC_TYPE_VECTOR) { // includes ARRAY, too
- skip_size = (vt->desc >> LOW_TYPE_BITS) & MAX_ELEMENT_SIZE;
- skip_size *= mono_array_length ((MonoArray*)start);
- skip_size += sizeof (MonoArray);
+ skip_size = safe_object_get_size ((MonoObject*)start);
skip_size += (ALLOC_ALIGN - 1);
skip_size &= ~(ALLOC_ALIGN - 1);
OBJ_VECTOR_FOREACH_PTR (vt, start);
type_str, type_rlen, type_vector, type_bitmap, type_lbit, type_complex);*/
}
+static gboolean
+need_remove_object_for_domain (char *start, MonoDomain *domain)
+{
+ GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
+ if (mono_object_domain (start) == domain) {
+ DEBUG (1, fprintf (gc_debug_file, "Need to cleanup object %p, (%s)\n", start, safe_name (start)));
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static void
+process_object_for_domain_clearing (char *start, MonoDomain *domain)
+{
+ GCVTable *vt = (GCVTable*)LOAD_VTABLE (start);
+ /* The object could be a proxy for an object in the domain
+ we're deleting. */
+ if (mono_class_has_parent (vt->klass, mono_defaults.real_proxy_class)) {
+ MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
+
+ /* The server could already have been zeroed out, so
+ we need to check for that, too. */
+ if (server && (!LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
+ DEBUG (1, fprintf (gc_debug_file, "Cleaning up remote pointer in %p to object %p (%s)\n",
+ start, server, LOAD_VTABLE (server) ? safe_name (server) : "null"));
+ ((MonoRealProxy*)start)->unwrapped_server = NULL;
+ }
+ }
+}
+
static void __attribute__((noinline))
scan_area_for_domain (MonoDomain *domain, char *start, char *end)
{
GCVTable *vt;
size_t skip_size;
- int type, remove;
+ int type;
+ gboolean remove;
mword desc;
while (start < end) {
continue;
}
vt = (GCVTable*)LOAD_VTABLE (start);
- /* handle threads someway (maybe insert the root domain vtable?) */
- if (mono_object_domain (start) == domain && vt->klass != mono_defaults.thread_class) {
- DEBUG (1, fprintf (gc_debug_file, "Need to cleanup object %p, (%s)\n", start, safe_name (start)));
- remove = 1;
- } else {
- remove = 0;
+ process_object_for_domain_clearing (start, domain);
+ remove = need_remove_object_for_domain (start, domain);
+ if (remove && ((MonoObject*)start)->synchronisation) {
+ void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)start);
+ if (dislink)
+ mono_gc_register_disappearing_link (NULL, dislink, FALSE);
}
desc = vt->desc;
type = desc & 0x7;
start += skip_size;
continue;
} else if (type == DESC_TYPE_VECTOR) { // includes ARRAY, too
- skip_size = (vt->desc >> LOW_TYPE_BITS) & MAX_ELEMENT_SIZE;
- skip_size *= mono_array_length ((MonoArray*)start);
- skip_size += sizeof (MonoArray);
+ skip_size = safe_object_get_size ((MonoObject*)start);
skip_size += (ALLOC_ALIGN - 1);
skip_size &= ~(ALLOC_ALIGN - 1);
if (type == DESC_TYPE_ARRAY) {
}
}
+static void
+clear_domain_process_pinned_object_callback (PinnedChunk *chunk, char *obj, size_t size, MonoDomain *domain)
+{
+ process_object_for_domain_clearing (obj, domain);
+}
+
+static void
+clear_domain_free_pinned_object_callback (PinnedChunk *chunk, char *obj, size_t size, MonoDomain *domain)
+{
+ if (need_remove_object_for_domain (obj, domain))
+ free_pinned_object (chunk, obj, size);
+}
+
/*
* When appdomains are unloaded we can easily remove objects that have finalizers,
* but all the others could still be present in random places on the heap.
mono_gc_clear_domain (MonoDomain * domain)
{
GCMemSection *section;
+ LOSObject *bigobj, *prev;
+ Fragment *frag;
+ int i;
+
LOCK_GC;
+ /* Clear all remaining nursery fragments */
+ if (nursery_clear_policy == CLEAR_AT_TLAB_CREATION) {
+ g_assert (nursery_next <= nursery_frag_real_end);
+ memset (nursery_next, 0, nursery_frag_real_end - nursery_next);
+ for (frag = nursery_fragments; frag; frag = frag->next) {
+ memset (frag->fragment_start, 0, frag->fragment_end - frag->fragment_start);
+ }
+ }
+
for (section = section_list; section; section = section->next) {
scan_area_for_domain (domain, section->data, section->end_data);
}
- /* FIXME: handle big and fixed objects (we remove, don't clear in this case) */
+
+ /* We need two passes over pinned and large objects because
+ freeing such an object gives its memory back to the OS (in
+ the case of large objects) or obliterates its vtable
+ (pinned objects), but we might need to dereference a
+ pointer from an object to another object if the first
+ object is a proxy. */
+ scan_pinned_objects (clear_domain_process_pinned_object_callback, domain);
+ for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
+ process_object_for_domain_clearing (bigobj->data, domain);
+
+ prev = NULL;
+ for (bigobj = los_object_list; bigobj;) {
+ if (need_remove_object_for_domain (bigobj->data, domain)) {
+ LOSObject *to_free = bigobj;
+ if (prev)
+ prev->next = bigobj->next;
+ else
+ los_object_list = bigobj->next;
+ bigobj = bigobj->next;
+ DEBUG (1, fprintf (gc_debug_file, "Freeing large object %p (%s)\n",
+ bigobj->data, safe_name (bigobj->data)));
+ free_large_object (to_free);
+ continue;
+ }
+ prev = bigobj;
+ bigobj = bigobj->next;
+ }
+ scan_pinned_objects (clear_domain_free_pinned_object_callback, domain);
+
+ for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
+ null_links_for_domain (domain, i);
+
UNLOCK_GC;
}
+/*
+ * add_to_global_remset:
+ *
+ * The global remset contains locations which point into newspace after
+ * a minor collection. This can happen if the objects they point to are pinned.
+ */
static void
-add_to_global_remset (gpointer ptr)
+add_to_global_remset (gpointer ptr, gboolean root)
{
RememberedSet *rs;
+
DEBUG (8, fprintf (gc_debug_file, "Adding global remset for %p\n", ptr));
- if (global_remset->store_next < global_remset->end_set) {
- *(global_remset->store_next++) = (mword)ptr;
+
+ /*
+ * FIXME: If an object remains pinned, we need to add it at every minor collection.
+ * To avoid uncontrolled growth of the global remset, only add each pointer once.
+ */
+ if (global_remset->store_next + 3 < global_remset->end_set) {
+ if (root) {
+ *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER;
+ *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION;
+ } else {
+ *(global_remset->store_next++) = (mword)ptr;
+ }
return;
}
rs = alloc_remset (global_remset->end_set - global_remset->data, NULL);
rs->next = global_remset;
global_remset = rs;
- *(global_remset->store_next++) = (mword)ptr;
+ if (root) {
+ *(global_remset->store_next++) = (mword)ptr | REMSET_OTHER;
+ *(global_remset->store_next++) = (mword)REMSET_ROOT_LOCATION;
+ } else {
+ *(global_remset->store_next++) = (mword)ptr;
+ }
+
+ {
+ int global_rs_size = 0;
+
+ for (rs = global_remset; rs; rs = rs->next) {
+ global_rs_size += rs->store_next - rs->data;
+ }
+ DEBUG (4, fprintf (gc_debug_file, "Global remset now has size %d\n", global_rs_size));
+ }
}
/*
static char* __attribute__((noinline))
copy_object (char *obj, char *from_space_start, char *from_space_end)
{
+ static void *copy_labels [] = { &&LAB_0, &&LAB_1, &&LAB_2, &&LAB_3, &&LAB_4, &&LAB_5, &&LAB_6, &&LAB_7, &&LAB_8 };
+
+ /*
+ * FIXME: The second set of checks is only needed if we are called for tospace
+ * objects too.
+ */
if (obj >= from_space_start && obj < from_space_end && (obj < to_space || obj >= to_space_end)) {
MonoVTable *vt;
char *forwarded;
* At the end of major collections, we walk the los list and if
* the object is pinned, it is marked, otherwise it can be freed.
*/
- if (objsize >= MAX_SMALL_OBJ_SIZE || (obj >= min_pinned_chunk_addr && obj < max_pinned_chunk_addr && obj_is_from_pinned_alloc (obj))) {
+ if (G_UNLIKELY (objsize >= MAX_SMALL_OBJ_SIZE || (obj >= min_pinned_chunk_addr && obj < max_pinned_chunk_addr && obj_is_from_pinned_alloc (obj)))) {
DEBUG (9, fprintf (gc_debug_file, "Marked LOS/Pinned %p (%s), size: %zd\n", obj, safe_name (obj), objsize));
pin_object (obj);
return obj;
}
/* ok, the object is not pinned, we can move it */
/* use a optimized memcpy here */
+ if (objsize <= sizeof (gpointer) * 8) {
+ mword *dest = (mword*)gray_objects;
+ goto *copy_labels [objsize / sizeof (gpointer)];
+ LAB_8:
+ (dest) [7] = ((mword*)obj) [7];
+ LAB_7:
+ (dest) [6] = ((mword*)obj) [6];
+ LAB_6:
+ (dest) [5] = ((mword*)obj) [5];
+ LAB_5:
+ (dest) [4] = ((mword*)obj) [4];
+ LAB_4:
+ (dest) [3] = ((mword*)obj) [3];
+ LAB_3:
+ (dest) [2] = ((mword*)obj) [2];
+ LAB_2:
+ (dest) [1] = ((mword*)obj) [1];
+ LAB_1:
+ (dest) [0] = ((mword*)obj) [0];
+ LAB_0:
+ ;
+ } else {
#if 0
{
int ecx;
#else
memcpy (gray_objects, obj, objsize);
#endif
+ }
/* adjust array->bounds */
vt = ((MonoObject*)obj)->vtable;
g_assert (vt->gc_descr);
- if (vt->rank && ((MonoArray*)obj)->bounds) {
+ if (G_UNLIKELY (vt->rank && ((MonoArray*)obj)->bounds)) {
MonoArray *array = (MonoArray*)gray_objects;
array->bounds = (MonoArrayBounds*)((char*)gray_objects + ((char*)((MonoArray*)obj)->bounds - (char*)obj));
DEBUG (9, fprintf (gc_debug_file, "Array instance %p: size: %zd, rank: %d, length: %d\n", array, objsize, vt->rank, mono_array_length (array)));
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
- if (*(ptr)) { \
- void *__old = *(ptr); \
- *(ptr) = copy_object (*(ptr), from_start, from_end); \
+ void *__old = *(ptr); \
+ if (__old) { \
+ *(ptr) = copy_object (__old, from_start, from_end); \
DEBUG (9, if (__old != *(ptr)) fprintf (gc_debug_file, "Overwrote field at %p with %p (was: %p)\n", (ptr), *(ptr), __old)); \
- if (G_UNLIKELY (*(ptr) >= (void*)from_start && *(ptr) < (void*)from_end)) \
- add_to_global_remset ((ptr)); \
+ if (G_UNLIKELY (*(ptr) >= (void*)from_start && *(ptr) < (void*)from_end) && !ptr_in_nursery (ptr)) \
+ add_to_global_remset ((ptr), FALSE); \
} \
} while (0)
return NULL;
}
+/*
+ * drain_gray_stack:
+ *
+ * Scan objects in the gray stack until the stack is empty. This should be called
+ * frequently after each object is copied, to achieve better locality and cache
+ * usage.
+ */
+static void inline
+drain_gray_stack (char *start_addr, char *end_addr)
+{
+ char *gray_start = gray_first;
+
+ while (gray_start < gray_objects) {
+ DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", gray_start, safe_name (gray_start)));
+ gray_start = scan_object (gray_start, start_addr, end_addr);
+ }
+
+ gray_first = gray_start;
+}
+
/*
* scan_vtype:
*
}
}
-static void
+static G_GNUC_UNUSED void
print_nursery_gaps (void* start_nursery, void *end_nursery)
{
int i;
* is no write in the old generation area where the pinned object is referenced
* and we may not consider it as reachable.
*/
-static void
+static G_GNUC_UNUSED void
mark_pinned_objects (int generation)
{
}
/*
* Debugging function: find in the conservative roots where @obj is being pinned.
*/
-static void
+static G_GNUC_UNUSED void
find_pinning_reference (char *obj, size_t size)
{
RootRecord *root;
int i;
char *endobj = obj + size;
- for (i = 0; i < roots_hash_size; ++i) {
- for (root = roots_hash [i]; root; root = root->next) {
+ for (i = 0; i < roots_hash_size [0]; ++i) {
+ for (root = roots_hash [0][i]; root; root = root->next) {
/* if desc is non-null it has precise info */
if (!root->root_desc) {
char ** start = (char**)root->start_root;
{
RootRecord *root;
int i;
- DEBUG (3, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d entries)\n", (int)roots_size, num_roots_entries));
+ DEBUG (2, fprintf (gc_debug_file, "Scanning pinned roots (%d bytes, %d/%d entries)\n", (int)roots_size, num_roots_entries [ROOT_TYPE_NORMAL], num_roots_entries [ROOT_TYPE_PINNED]));
/* objects pinned from the API are inside these roots */
- for (i = 0; i < roots_hash_size; ++i) {
- for (root = roots_hash [i]; root; root = root->next) {
- /* if desc is non-null it has precise info */
- if (root->root_desc)
- continue;
+ for (i = 0; i < roots_hash_size [ROOT_TYPE_PINNED]; ++i) {
+ for (root = roots_hash [ROOT_TYPE_PINNED][i]; root; root = root->next) {
DEBUG (6, fprintf (gc_debug_file, "Pinned roots %p-%p\n", root->start_root, root->end_root));
conservatively_pin_objects_from ((void**)root->start_root, (void**)root->end_root, start_nursery, end_nursery);
}
* *) the _last_ managed stack frame
* *) pointers slots in managed frames
*/
- pin_thread_data (start_nursery, end_nursery);
+ scan_thread_data (start_nursery, end_nursery, FALSE);
+}
+
+/* Copy function called from user defined mark functions */
+static char *user_copy_n_start;
+static char *user_copy_n_end;
+
+static void*
+user_copy (void *addr)
+{
+ if (addr)
+ return copy_object (addr, user_copy_n_start, user_copy_n_end);
+ else
+ return NULL;
}
/*
while (desc) {
if ((desc & 1) && *start_root) {
*start_root = copy_object (*start_root, n_start, n_end);
- DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root)); \
+ DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", start_root, *start_root));
+ drain_gray_stack (n_start, n_end);
}
desc >>= 1;
start_root++;
}
return;
+ case ROOT_DESC_COMPLEX: {
+ gsize *bitmap_data = complex_descriptors + (desc >> ROOT_DESC_TYPE_SHIFT);
+ int bwords = (*bitmap_data) - 1;
+ void **start_run = start_root;
+ bitmap_data++;
+ while (bwords-- > 0) {
+ gsize bmap = *bitmap_data++;
+ void **objptr = start_run;
+ while (bmap) {
+ if ((bmap & 1) && *objptr) {
+ *objptr = copy_object (*objptr, n_start, n_end);
+ DEBUG (9, fprintf (gc_debug_file, "Overwrote root at %p with %p\n", objptr, *objptr));
+ drain_gray_stack (n_start, n_end);
+ }
+ bmap >>= 1;
+ ++objptr;
+ }
+ start_run += GC_BITS_PER_WORD;
+ }
+ break;
+ }
+ case ROOT_DESC_USER: {
+ MonoGCMarkFunc marker = user_descriptors [desc >> ROOT_DESC_TYPE_SHIFT];
+
+ user_copy_n_start = n_start;
+ user_copy_n_end = n_end;
+ marker (start_root, user_copy);
+ break;
+ }
case ROOT_DESC_RUN_LEN:
- case ROOT_DESC_LARGE_BITMAP:
+ g_assert_not_reached ();
+ default:
g_assert_not_reached ();
}
}
char *data;
int scan_starts;
Fragment *frag;
+ int alloc_size;
if (nursery_section)
return;
*/
/* FIXME: handle OOM */
section = get_internal_mem (sizeof (GCMemSection));
- data = get_os_memory (nursery_size, TRUE);
- nursery_start = nursery_next = data;
- nursery_real_end = data + nursery_size;
+
+#ifdef ALIGN_NURSERY
+ /* Allocate twice the memory to be able to put the nursery at an aligned address */
+ g_assert (nursery_size == DEFAULT_NURSERY_SIZE);
+
+ alloc_size = nursery_size * 2;
+ data = get_os_memory (alloc_size, TRUE);
+ nursery_start = (void*)(((mword)data + (1 << DEFAULT_NURSERY_BITS) - 1) & ~((1 << DEFAULT_NURSERY_BITS) - 1));
+ g_assert ((char*)nursery_start + nursery_size <= ((char*)data + alloc_size));
+ /* FIXME: Use the remaining size for something else, if it is big enough */
+#else
+ alloc_size = nursery_size;
+ data = get_os_memory (alloc_size, TRUE);
+ nursery_start = data;
+#endif
+ nursery_real_end = nursery_start + nursery_size;
UPDATE_HEAP_BOUNDARIES (nursery_start, nursery_real_end);
- total_alloc += nursery_size;
+ nursery_next = nursery_start;
+ total_alloc += alloc_size;
DEBUG (4, fprintf (gc_debug_file, "Expanding heap size: %zd, total: %zd\n", nursery_size, total_alloc));
section->data = section->next_data = data;
- section->size = nursery_size;
+ section->size = alloc_size;
section->end_data = nursery_real_end;
- scan_starts = nursery_size / SCAN_START_SIZE;
+ scan_starts = alloc_size / SCAN_START_SIZE;
section->scan_starts = get_internal_mem (sizeof (char*) * scan_starts);
section->num_scan_start = scan_starts;
section->role = MEMORY_ROLE_GEN0;
/* FIXME: frag here is lost */
}
+static void
+scan_finalizer_entries (FinalizeEntry *list, char *start, char *end) {
+ FinalizeEntry *fin;
+
+ for (fin = list; fin; fin = fin->next) {
+ if (!fin->object)
+ continue;
+ DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
+ fin->object = copy_object (fin->object, start, end);
+ }
+}
+
/*
* Update roots in the old generation. Since we currently don't have the
* info from the write barriers, we just scan all the objects.
*/
-static void
+static G_GNUC_UNUSED void
scan_old_generation (char *start, char* end)
{
GCMemSection *section;
- FinalizeEntry *fin;
LOSObject *big_object;
char *p;
scan_object (big_object->data, start, end);
}
/* scan the list of objects ready for finalization */
- for (fin = fin_ready_list; fin; fin = fin->next) {
- DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
- fin->object = copy_object (fin->object, start, end);
- }
+ scan_finalizer_entries (fin_ready_list, start, end);
+ scan_finalizer_entries (critical_fin_list, start, end);
}
static mword fragment_total = 0;
return count;
}
+static DisappearingLinkHashTable*
+get_dislink_hash_table (int generation)
+{
+ switch (generation) {
+ case GENERATION_NURSERY: return &minor_disappearing_link_hash;
+ case GENERATION_OLD: return &major_disappearing_link_hash;
+ default: g_assert_not_reached ();
+ }
+}
+
+static FinalizeEntryHashTable*
+get_finalize_entry_hash_table (int generation)
+{
+ switch (generation) {
+ case GENERATION_NURSERY: return &minor_finalizable_hash;
+ case GENERATION_OLD: return &major_finalizable_hash;
+ default: g_assert_not_reached ();
+ }
+}
+
static void
-drain_gray_stack (char *start_addr, char *end_addr)
+finish_gray_stack (char *start_addr, char *end_addr, int generation)
{
TV_DECLARE (atv);
TV_DECLARE (btv);
* We need to walk the LO list as well in search of marked big objects
* (use a flag since this is needed only on major collections). We need to loop
* here as well, so keep a counter of marked LO (increasing it in copy_object).
+ * To achieve better cache locality and cache usage, we drain the gray stack
+ * frequently, after each object is copied, and just finish the work here.
*/
- TV_GETTIME (btv);
- gray_start = to_space;
- DEBUG (6, fprintf (gc_debug_file, "Precise scan of gray area: %p-%p, size: %d\n", gray_start, gray_objects, (int)(gray_objects - gray_start)));
+ gray_start = gray_first;
while (gray_start < gray_objects) {
DEBUG (9, fprintf (gc_debug_file, "Precise gray object scan %p (%s)\n", gray_start, safe_name (gray_start)));
gray_start = scan_object (gray_start, start_addr, end_addr);
}
TV_GETTIME (atv);
- DEBUG (2, fprintf (gc_debug_file, "Gray stack scan: %d usecs\n", TV_ELAPSED (btv, atv)));
//scan_old_generation (start_addr, end_addr);
DEBUG (2, fprintf (gc_debug_file, "Old generation done\n"));
/* walk the finalization queue and move also the objects that need to be
*/
do {
fin_ready = num_ready_finalizers;
- finalize_in_range (start_addr, end_addr);
+ finalize_in_range (start_addr, end_addr, generation);
+ if (generation == GENERATION_OLD)
+ finalize_in_range (nursery_start, nursery_real_end, GENERATION_NURSERY);
bigo_scanned_num = scan_needed_big_objects (start_addr, end_addr);
/* drain the new stack that might have been created */
* GC a finalized object my lose the monitor because it is cleared before the finalizer is
* called.
*/
- null_link_in_range (start_addr, end_addr);
+ null_link_in_range (start_addr, end_addr, generation);
TV_GETTIME (btv);
DEBUG (2, fprintf (gc_debug_file, "Finalize queue handling scan: %d usecs\n", TV_ELAPSED (atv, btv)));
}
}
static void
-scan_from_registered_roots (char *addr_start, char *addr_end)
+scan_from_registered_roots (char *addr_start, char *addr_end, int root_type)
{
int i;
RootRecord *root;
- for (i = 0; i < roots_hash_size; ++i) {
- for (root = roots_hash [i]; root; root = root->next) {
- /* if desc is non-null it has precise info */
- if (!root->root_desc)
- continue;
+ for (i = 0; i < roots_hash_size [root_type]; ++i) {
+ for (root = roots_hash [root_type][i]; root; root = root->next) {
DEBUG (6, fprintf (gc_debug_file, "Precise root scan %p-%p (desc: %p)\n", root->start_root, root->end_root, (void*)root->root_desc));
precisely_scan_objects_from ((void**)root->start_root, (void**)root->end_root, addr_start, addr_end, root->root_desc);
}
* We reset to_space if we allocated objects in degraded mode.
*/
if (to_space_section)
- to_space = gray_objects = to_space_section->next_data;
+ to_space = gray_objects = gray_first = to_space_section->next_data;
if ((to_space_end - to_space) < max_garbage_amount) {
section = alloc_section (nursery_section->size * 4);
g_assert (nursery_section->size >= max_garbage_amount);
- to_space = gray_objects = section->next_data;
+ to_space = gray_objects = gray_first = section->next_data;
to_space_end = section->end_data;
to_space_section = section;
}
/* we don't have complete write barrier yet, so we scan all the old generation sections */
TV_GETTIME (atv);
DEBUG (2, fprintf (gc_debug_file, "Old generation scan: %d usecs\n", TV_ELAPSED (btv, atv)));
- /* FIXME: later scan also alloc_pinned objects */
/* the pinned objects are roots */
for (i = 0; i < next_pin_slot; ++i) {
scan_object (pin_queue [i], nursery_start, nursery_next);
}
/* registered roots, this includes static fields */
- scan_from_registered_roots (nursery_start, nursery_next);
+ scan_from_registered_roots (nursery_start, nursery_next, ROOT_TYPE_NORMAL);
+ scan_thread_data (nursery_start, nursery_next, TRUE);
/* alloc_pinned objects */
scan_from_pinned_objects (nursery_start, nursery_next);
TV_GETTIME (btv);
DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (atv, btv)));
- drain_gray_stack (nursery_start, nursery_next);
+ finish_gray_stack (nursery_start, nursery_next, GENERATION_NURSERY);
/* walk the pin_queue, build up the fragment list of free memory, unmark
* pinned objects as we go, memzero() the empty fragments so they are ready for the
/* prepare the pin queue for the next collection */
last_num_pinned = next_pin_slot;
next_pin_slot = 0;
- if (fin_ready_list) {
+ if (fin_ready_list || critical_fin_list) {
DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
mono_gc_finalize_notify ();
}
LOSObject *bigobj, *prevbo;
int i;
PinnedChunk *chunk;
- FinalizeEntry *fin;
Fragment *frag;
int count;
TV_DECLARE (all_atv);
/* allocate the big to space */
DEBUG (4, fprintf (gc_debug_file, "Allocate tospace for size: %zd\n", copy_space_required));
section = alloc_section (copy_space_required);
- to_space = gray_objects = section->next_data;
+ to_space = gray_objects = gray_first = section->next_data;
to_space_end = section->end_data;
to_space_section = section;
scan_object (pin_queue [i], heap_start, heap_end);
}
/* registered roots, this includes static fields */
- scan_from_registered_roots (heap_start, heap_end);
+ scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_NORMAL);
+ scan_from_registered_roots (heap_start, heap_end, ROOT_TYPE_WBARRIER);
+ /* Threads */
+ scan_thread_data (heap_start, heap_end, TRUE);
/* alloc_pinned objects */
scan_from_pinned_objects (heap_start, heap_end);
/* scan the list of objects ready for finalization */
- for (fin = fin_ready_list; fin; fin = fin->next) {
- DEBUG (5, fprintf (gc_debug_file, "Scan of fin ready object: %p (%s)\n", fin->object, safe_name (fin->object)));
- fin->object = copy_object (fin->object, heap_start, heap_end);
- }
+ scan_finalizer_entries (fin_ready_list, heap_start, heap_end);
+ scan_finalizer_entries (critical_fin_list, heap_start, heap_end);
TV_GETTIME (atv);
DEBUG (2, fprintf (gc_debug_file, "Root scan: %d usecs\n", TV_ELAPSED (btv, atv)));
/* we need to go over the big object list to see if any was marked and scan it
* And we need to make this in a loop, considering that objects referenced by finalizable
- * objects could reference big objects (this happens in drain_gray_stack ())
+ * objects could reference big objects (this happens in finish_gray_stack ())
*/
scan_needed_big_objects (heap_start, heap_end);
/* all the objects in the heap */
- drain_gray_stack (heap_start, heap_end);
+ finish_gray_stack (heap_start, heap_end, GENERATION_OLD);
+ null_link_in_range (nursery_start, nursery_real_end, GENERATION_NURSERY);
/* sweep the big objects list */
prevbo = NULL;
mono_stats.major_gc_time_usecs += TV_ELAPSED (all_atv, all_btv);
/* prepare the pin queue for the next collection */
next_pin_slot = 0;
- if (fin_ready_list) {
+ if (fin_ready_list || critical_fin_list) {
DEBUG (4, fprintf (gc_debug_file, "Finalizer-thread wakeup: ready %d\n", num_ready_finalizers));
mono_gc_finalize_notify ();
}
/*
* Debug reporting.
*/
-static void
+static G_GNUC_UNUSED void
report_internal_mem_usage (void) {
PinnedChunk *chunk;
int i;
}
static void
-sweep_pinned_objects (void)
+scan_pinned_objects (void (*callback) (PinnedChunk*, char*, size_t, void*), void *callback_data)
{
PinnedChunk *chunk;
int i, obj_size;
void *end_chunk;
for (chunk = pinned_chunk_list; chunk; chunk = chunk->next) {
end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Sweeping pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk));
+ DEBUG (6, fprintf (gc_debug_file, "Scanning pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk));
for (i = 0; i < chunk->num_pages; ++i) {
obj_size = chunk->page_sizes [i];
if (!obj_size)
ptr = (void**)p;
DEBUG (9, fprintf (gc_debug_file, "Considering %p (vtable: %p)\n", ptr, *ptr));
/* if the first word (the vtable) is outside the chunk we have an object */
- if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk)) {
- if (object_is_pinned (ptr)) {
- unpin_object (ptr);
- DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
- } else {
- /* FIXME: add to freelist */
- DEBUG (6, fprintf (gc_debug_file, "Going to free unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
- }
- }
+ if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk))
+ callback (chunk, (char*)ptr, obj_size, callback_data);
p += obj_size;
}
}
}
static void
-scan_from_pinned_objects (char *addr_start, char *addr_end)
+sweep_pinned_objects_callback (PinnedChunk *chunk, char *ptr, size_t size, void *data)
{
- PinnedChunk *chunk;
- int i, obj_size;
- char *p, *endp;
- void **ptr;
- void *end_chunk;
- for (chunk = pinned_chunk_list; chunk; chunk = chunk->next) {
- end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Scanning pinned chunk %p (range: %p-%p)\n", chunk, chunk->start_data, end_chunk));
- for (i = 0; i < chunk->num_pages; ++i) {
- obj_size = chunk->page_sizes [i];
- if (!obj_size)
- continue;
- p = i? (char*)chunk + i * FREELIST_PAGESIZE: chunk->start_data;
- endp = i? p + FREELIST_PAGESIZE: (char*)chunk + FREELIST_PAGESIZE;
- DEBUG (6, fprintf (gc_debug_file, "Page %d (size: %d, range: %p-%p)\n", i, obj_size, p, endp));
- while (p + obj_size <= endp) {
- ptr = (void**)p;
- DEBUG (9, fprintf (gc_debug_file, "Considering %p (vtable: %p)\n", ptr, *ptr));
- /* if the first word (the vtable) is outside the chunk we have an object */
- if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk)) {
- DEBUG (6, fprintf (gc_debug_file, "Precise object scan %d of alloc_pinned %p (%s)\n", i, ptr, safe_name (ptr)));
- // FIXME: Put objects without references into separate chunks
- // which do not need to be scanned
- scan_object ((char*)ptr, addr_start, addr_end);
- }
- p += obj_size;
- }
- }
+ if (object_is_pinned (ptr)) {
+ unpin_object (ptr);
+ DEBUG (6, fprintf (gc_debug_file, "Unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
+ } else {
+ DEBUG (6, fprintf (gc_debug_file, "Freeing unmarked pinned object %p (%s)\n", ptr, safe_name (ptr)));
+ free_pinned_object (chunk, ptr, size);
}
}
+static void
+sweep_pinned_objects (void)
+{
+ scan_pinned_objects (sweep_pinned_objects_callback, NULL);
+}
+
+static void
+scan_object_callback (PinnedChunk *chunk, char *ptr, size_t size, char **data)
+{
+ DEBUG (6, fprintf (gc_debug_file, "Precise object scan of alloc_pinned %p (%s)\n", ptr, safe_name (ptr)));
+ /* FIXME: Put objects without references into separate chunks
+ which do not need to be scanned */
+ scan_object (ptr, data [0], data [1]);
+}
+
+static void
+scan_from_pinned_objects (char *addr_start, char *addr_end)
+{
+ char *data [2] = { addr_start, addr_end };
+ scan_pinned_objects (scan_object_callback, data);
+}
+
/*
* Find the slot number in the freelist for memory chunks that
* can contain @size objects.
*/
#define object_is_fin_ready(obj) (!object_is_pinned (obj) && !object_is_forwarded (obj))
-static void
-finalize_in_range (char *start, char *end)
+static gboolean
+is_critical_finalizer (FinalizeEntry *entry)
{
- FinalizeEntry *entry, *prev;
- int i;
- if (no_finalize)
+ MonoObject *obj;
+ MonoClass *class;
+
+ if (!mono_defaults.critical_finalizer_object)
+ return FALSE;
+
+ obj = entry->object;
+ class = ((MonoVTable*)LOAD_VTABLE (obj))->klass;
+
+ return mono_class_has_parent (class, mono_defaults.critical_finalizer_object);
+}
+
+static void
+queue_finalization_entry (FinalizeEntry *entry) {
+ if (is_critical_finalizer (entry)) {
+ entry->next = critical_fin_list;
+ critical_fin_list = entry;
+ } else {
+ entry->next = fin_ready_list;
+ fin_ready_list = entry;
+ }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+rehash_fin_table (FinalizeEntryHashTable *hash_table)
+{
+ FinalizeEntry **finalizable_hash = hash_table->table;
+ mword finalizable_hash_size = hash_table->size;
+ int i;
+ unsigned int hash;
+ FinalizeEntry **new_hash;
+ FinalizeEntry *entry, *next;
+ int new_size = g_spaced_primes_closest (hash_table->num_registered);
+
+ new_hash = get_internal_mem (new_size * sizeof (FinalizeEntry*));
+ for (i = 0; i < finalizable_hash_size; ++i) {
+ for (entry = finalizable_hash [i]; entry; entry = next) {
+ hash = mono_object_hash (entry->object) % new_size;
+ next = entry->next;
+ entry->next = new_hash [hash];
+ new_hash [hash] = entry;
+ }
+ }
+ free_internal_mem (finalizable_hash);
+ hash_table->table = new_hash;
+ hash_table->size = new_size;
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+rehash_fin_table_if_necessary (FinalizeEntryHashTable *hash_table)
+{
+ if (hash_table->num_registered >= hash_table->size * 2)
+ rehash_fin_table (hash_table);
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+finalize_in_range (char *start, char *end, int generation)
+{
+ FinalizeEntryHashTable *hash_table = get_finalize_entry_hash_table (generation);
+ FinalizeEntry *entry, *prev;
+ int i;
+ FinalizeEntry **finalizable_hash = hash_table->table;
+ mword finalizable_hash_size = hash_table->size;
+
+ if (no_finalize)
return;
for (i = 0; i < finalizable_hash_size; ++i) {
prev = NULL;
for (entry = finalizable_hash [i]; entry;) {
if ((char*)entry->object >= start && (char*)entry->object < end && ((char*)entry->object < to_space || (char*)entry->object >= to_space_end)) {
- if (object_is_fin_ready (entry->object)) {
+ gboolean is_fin_ready = object_is_fin_ready (entry->object);
+ char *copy = copy_object (entry->object, start, end);
+ if (is_fin_ready) {
char *from;
FinalizeEntry *next;
/* remove and put in fin_ready_list */
finalizable_hash [i] = entry->next;
next = entry->next;
num_ready_finalizers++;
- num_registered_finalizers--;
- entry->next = fin_ready_list;
- fin_ready_list = entry;
+ hash_table->num_registered--;
+ queue_finalization_entry (entry);
/* Make it survive */
from = entry->object;
- entry->object = copy_object (entry->object, start, end);
- DEBUG (5, fprintf (gc_debug_file, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", entry->object, safe_name (entry->object), from, num_ready_finalizers, num_registered_finalizers));
+ entry->object = copy;
+ DEBUG (5, fprintf (gc_debug_file, "Queueing object for finalization: %p (%s) (was at %p) (%d/%d)\n", entry->object, safe_name (entry->object), from, num_ready_finalizers, hash_table->num_registered));
entry = next;
continue;
} else {
- /* update pointer */
- DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s)\n", entry->object, safe_name (entry->object)));
- entry->object = copy_object (entry->object, start, end);
+ char *from = entry->object;
+ if (hash_table == &minor_finalizable_hash && !ptr_in_nursery (copy)) {
+ FinalizeEntry *next = entry->next;
+ unsigned int major_hash;
+ /* remove from the list */
+ if (prev)
+ prev->next = entry->next;
+ else
+ finalizable_hash [i] = entry->next;
+ hash_table->num_registered--;
+
+ entry->object = copy;
+
+ /* insert it into the major hash */
+ rehash_fin_table_if_necessary (&major_finalizable_hash);
+ major_hash = mono_object_hash ((MonoObject*) copy) %
+ major_finalizable_hash.size;
+ entry->next = major_finalizable_hash.table [major_hash];
+ major_finalizable_hash.table [major_hash] = entry;
+ major_finalizable_hash.num_registered++;
+
+ DEBUG (5, fprintf (gc_debug_file, "Promoting finalization of object %p (%s) (was at %p) to major table\n", copy, safe_name (copy), from));
+
+ entry = next;
+ continue;
+ } else {
+ /* update pointer */
+ DEBUG (5, fprintf (gc_debug_file, "Updating object for finalization: %p (%s) (was at %p)\n", entry->object, safe_name (entry->object), from));
+ entry->object = copy;
+ }
}
}
prev = entry;
}
}
+/* LOCKING: requires that the GC lock is held */
static void
-null_link_in_range (char *start, char *end)
+null_link_in_range (char *start, char *end, int generation)
{
- FinalizeEntry *entry, *prev;
+ DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
+ DisappearingLink **disappearing_link_hash = hash->table;
+ int disappearing_link_hash_size = hash->size;
+ DisappearingLink *entry, *prev;
int i;
+ if (!hash->num_links)
+ return;
for (i = 0; i < disappearing_link_hash_size; ++i) {
prev = NULL;
for (entry = disappearing_link_hash [i]; entry;) {
- if ((char*)entry->object >= start && (char*)entry->object < end && ((char*)entry->object < to_space || (char*)entry->object >= to_space_end)) {
- if (object_is_fin_ready (entry->object)) {
- void **p = entry->data;
- FinalizeEntry *old;
+ char *object = DISLINK_OBJECT (entry);
+ if (object >= start && object < end && (object < to_space || object >= to_space_end)) {
+ gboolean track = DISLINK_TRACK (entry);
+ if (!track && object_is_fin_ready (object)) {
+ void **p = entry->link;
+ DisappearingLink *old;
*p = NULL;
/* remove from list */
if (prev)
prev->next = entry->next;
else
disappearing_link_hash [i] = entry->next;
- DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", p, entry->object));
+ DEBUG (5, fprintf (gc_debug_file, "Dislink nullified at %p to GCed object %p\n", p, object));
old = entry->next;
free_internal_mem (entry);
entry = old;
- num_disappearing_links--;
+ hash->num_links--;
continue;
} else {
- void **link;
- /* update pointer if it's moved
+ char *copy = copy_object (object, start, end);
+
+ /* Update pointer if it's moved. If the object
+ * has been moved out of the nursery, we need to
+ * remove the link from the minor hash table to
+ * the major one.
+ *
* FIXME: what if an object is moved earlier?
*/
- entry->object = copy_object (entry->object, start, end);
- DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", entry->data, entry->object));
- link = entry->data;
- *link = entry->object;
+
+ if (hash == &minor_disappearing_link_hash && !ptr_in_nursery (copy)) {
+ void **link = entry->link;
+ DisappearingLink *old;
+ /* remove from list */
+ if (prev)
+ prev->next = entry->next;
+ else
+ disappearing_link_hash [i] = entry->next;
+ old = entry->next;
+ free_internal_mem (entry);
+ entry = old;
+ hash->num_links--;
+
+ add_or_remove_disappearing_link ((MonoObject*)copy, link, track,
+ &major_disappearing_link_hash);
+
+ DEBUG (5, fprintf (gc_debug_file, "Upgraded dislink at %p to major because object %p moved to %p\n", link, object, copy));
+
+ continue;
+ } else {
+ /* We set the track resurrection bit to
+ * FALSE if the object is to be finalized
+ * so that the object can be collected in
+ * the next cycle (i.e. after it was
+ * finalized).
+ */
+ *entry->link = HIDE_POINTER (copy,
+ object_is_fin_ready (object) ? FALSE : track);
+ DEBUG (5, fprintf (gc_debug_file, "Updated dislink at %p to %p\n", entry->link, DISLINK_OBJECT (entry)));
+ }
}
}
prev = entry;
}
}
-/**
- * mono_gc_finalizers_for_domain:
- * @domain: the unloading appdomain
- * @out_array: output array
- * @out_size: size of output array
- *
- * Store inside @out_array up to @out_size objects that belong to the unloading
- * appdomain @domain. Returns the number of stored items. Can be called repeteadly
- * until it returns 0.
- * The items are removed from the finalizer data structure, so the caller is supposed
- * to finalize them.
- * @out_array should be on the stack to allow the GC to know the objects are still alive.
- */
-int
-mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
+static const char*
+dislink_table_name (DisappearingLinkHashTable *table)
+{
+ if (table == &minor_disappearing_link_hash)
+ return "minor table";
+ return "major table";
+}
+
+/* LOCKING: requires that the GC lock is held */
+static void
+null_links_for_domain (MonoDomain *domain, int generation)
+{
+ DisappearingLinkHashTable *hash = get_dislink_hash_table (generation);
+ DisappearingLink **disappearing_link_hash = hash->table;
+ int disappearing_link_hash_size = hash->size;
+ DisappearingLink *entry, *prev;
+ int i;
+ for (i = 0; i < disappearing_link_hash_size; ++i) {
+ prev = NULL;
+ for (entry = disappearing_link_hash [i]; entry; ) {
+ char *object = DISLINK_OBJECT (entry);
+ /* FIXME: actually there should be no object
+ left in the domain with a non-null vtable
+ (provided we remove the Thread special
+ case) */
+ if (object && (!((MonoObject*)object)->vtable || mono_object_domain (object) == domain)) {
+ DisappearingLink *next = entry->next;
+
+ if (prev)
+ prev->next = next;
+ else
+ disappearing_link_hash [i] = next;
+
+ if (*(entry->link)) {
+ *(entry->link) = NULL;
+ g_warning ("Disappearing link %p not freed", entry->link);
+ } else {
+ free_internal_mem (entry);
+ }
+
+ entry = next;
+ continue;
+ }
+ prev = entry;
+ entry = entry->next;
+ }
+ }
+}
+
+/* LOCKING: requires that the GC lock is held */
+static int
+finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size,
+ FinalizeEntryHashTable *hash_table)
{
+ FinalizeEntry **finalizable_hash = hash_table->table;
+ mword finalizable_hash_size = hash_table->size;
FinalizeEntry *entry, *prev;
int i, count;
+
if (no_finalize || !out_size || !out_array)
return 0;
count = 0;
- LOCK_GC;
for (i = 0; i < finalizable_hash_size; ++i) {
prev = NULL;
for (entry = finalizable_hash [i]; entry;) {
else
finalizable_hash [i] = entry->next;
next = entry->next;
- num_registered_finalizers--;
+ hash_table->num_registered--;
out_array [count ++] = entry->object;
- DEBUG (5, fprintf (gc_debug_file, "Collecting object for finalization: %p (%s) (%d/%d)\n", entry->object, safe_name (entry->object), num_ready_finalizers, num_registered_finalizers));
+ DEBUG (5, fprintf (gc_debug_file, "Collecting object for finalization: %p (%s) (%d/%d)\n", entry->object, safe_name (entry->object), num_ready_finalizers, hash_table->num_registered));
entry = next;
- if (count == out_size) {
- UNLOCK_GC;
+ if (count == out_size)
return count;
- }
continue;
}
prev = entry;
entry = entry->next;
}
}
- UNLOCK_GC;
return count;
}
-static void
-rehash_fin_table (void)
+/**
+ * mono_gc_finalizers_for_domain:
+ * @domain: the unloading appdomain
+ * @out_array: output array
+ * @out_size: size of output array
+ *
+ * Store inside @out_array up to @out_size objects that belong to the unloading
+ * appdomain @domain. Returns the number of stored items. Can be called repeteadly
+ * until it returns 0.
+ * The items are removed from the finalizer data structure, so the caller is supposed
+ * to finalize them.
+ * @out_array should be on the stack to allow the GC to know the objects are still alive.
+ */
+int
+mono_gc_finalizers_for_domain (MonoDomain *domain, MonoObject **out_array, int out_size)
{
- int i;
- unsigned int hash;
- FinalizeEntry **new_hash;
- FinalizeEntry *entry, *next;
- int new_size = g_spaced_primes_closest (num_registered_finalizers);
+ int result;
- new_hash = get_internal_mem (new_size * sizeof (FinalizeEntry*));
- for (i = 0; i < finalizable_hash_size; ++i) {
- for (entry = finalizable_hash [i]; entry; entry = next) {
- hash = mono_object_hash (entry->object) % new_size;
- next = entry->next;
- entry->next = new_hash [hash];
- new_hash [hash] = entry;
- }
+ LOCK_GC;
+ result = finalizers_for_domain (domain, out_array, out_size, &minor_finalizable_hash);
+ if (result < out_size) {
+ result += finalizers_for_domain (domain, out_array + result, out_size - result,
+ &major_finalizable_hash);
}
- free_internal_mem (finalizable_hash);
- finalizable_hash = new_hash;
- finalizable_hash_size = new_size;
+ UNLOCK_GC;
+
+ return result;
}
-void
-mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
+static void
+register_for_finalization (MonoObject *obj, void *user_data, FinalizeEntryHashTable *hash_table)
{
+ FinalizeEntry **finalizable_hash;
+ mword finalizable_hash_size;
FinalizeEntry *entry, *prev;
unsigned int hash;
if (no_finalize)
return;
+ g_assert (user_data == NULL || user_data == mono_gc_run_finalize);
hash = mono_object_hash (obj);
LOCK_GC;
- if (num_registered_finalizers >= finalizable_hash_size * 2)
- rehash_fin_table ();
+ rehash_fin_table_if_necessary (hash_table);
+ finalizable_hash = hash_table->table;
+ finalizable_hash_size = hash_table->size;
hash %= finalizable_hash_size;
prev = NULL;
for (entry = finalizable_hash [hash]; entry; entry = entry->next) {
if (entry->object == obj) {
- if (user_data) {
- entry->data = user_data;
- } else {
+ if (!user_data) {
/* remove from the list */
if (prev)
prev->next = entry->next;
else
finalizable_hash [hash] = entry->next;
- num_registered_finalizers--;
- DEBUG (5, fprintf (gc_debug_file, "Removed finalizer %p for object: %p (%s) (%d)\n", entry, obj, obj->vtable->klass->name, num_registered_finalizers));
+ hash_table->num_registered--;
+ DEBUG (5, fprintf (gc_debug_file, "Removed finalizer %p for object: %p (%s) (%d)\n", entry, obj, obj->vtable->klass->name, hash_table->num_registered));
free_internal_mem (entry);
}
UNLOCK_GC;
}
entry = get_internal_mem (sizeof (FinalizeEntry));
entry->object = obj;
- entry->data = user_data;
entry->next = finalizable_hash [hash];
finalizable_hash [hash] = entry;
- num_registered_finalizers++;
- DEBUG (5, fprintf (gc_debug_file, "Added finalizer %p for object: %p (%s) (%d)\n", entry, obj, obj->vtable->klass->name, num_registered_finalizers));
+ hash_table->num_registered++;
+ DEBUG (5, fprintf (gc_debug_file, "Added finalizer %p for object: %p (%s) (%d)\n", entry, obj, obj->vtable->klass->name, hash_table->num_registered));
UNLOCK_GC;
}
+void
+mono_gc_register_for_finalization (MonoObject *obj, void *user_data)
+{
+ if (ptr_in_nursery (obj))
+ register_for_finalization (obj, user_data, &minor_finalizable_hash);
+ else
+ register_for_finalization (obj, user_data, &minor_finalizable_hash);
+}
+
static void
-rehash_dislink (void)
+rehash_dislink (DisappearingLinkHashTable *hash_table)
{
+ DisappearingLink **disappearing_link_hash = hash_table->table;
+ int disappearing_link_hash_size = hash_table->size;
int i;
unsigned int hash;
- FinalizeEntry **new_hash;
- FinalizeEntry *entry, *next;
- int new_size = g_spaced_primes_closest (num_disappearing_links);
+ DisappearingLink **new_hash;
+ DisappearingLink *entry, *next;
+ int new_size = g_spaced_primes_closest (hash_table->num_links);
- new_hash = get_internal_mem (new_size * sizeof (FinalizeEntry*));
+ new_hash = get_internal_mem (new_size * sizeof (DisappearingLink*));
for (i = 0; i < disappearing_link_hash_size; ++i) {
for (entry = disappearing_link_hash [i]; entry; entry = next) {
- hash = mono_aligned_addr_hash (entry->data) % new_size;
+ hash = mono_aligned_addr_hash (entry->link) % new_size;
next = entry->next;
entry->next = new_hash [hash];
new_hash [hash] = entry;
}
}
free_internal_mem (disappearing_link_hash);
- disappearing_link_hash = new_hash;
- disappearing_link_hash_size = new_size;
+ hash_table->table = new_hash;
+ hash_table->size = new_size;
}
+/* LOCKING: assumes the GC lock is held */
static void
-mono_gc_register_disappearing_link (MonoObject *obj, void *link)
+add_or_remove_disappearing_link (MonoObject *obj, void **link, gboolean track,
+ DisappearingLinkHashTable *hash_table)
{
- FinalizeEntry *entry, *prev;
+ DisappearingLink *entry, *prev;
unsigned int hash;
- LOCK_GC;
+ DisappearingLink **disappearing_link_hash = hash_table->table;
+ int disappearing_link_hash_size = hash_table->size;
- if (num_disappearing_links >= disappearing_link_hash_size * 2)
- rehash_dislink ();
+ if (hash_table->num_links >= disappearing_link_hash_size * 2) {
+ rehash_dislink (hash_table);
+ disappearing_link_hash = hash_table->table;
+ disappearing_link_hash_size = hash_table->size;
+ }
/* FIXME: add check that link is not in the heap */
hash = mono_aligned_addr_hash (link) % disappearing_link_hash_size;
entry = disappearing_link_hash [hash];
prev = NULL;
for (; entry; entry = entry->next) {
/* link already added */
- if (link == entry->data) {
+ if (link == entry->link) {
/* NULL obj means remove */
if (obj == NULL) {
if (prev)
prev->next = entry->next;
else
disappearing_link_hash [hash] = entry->next;
- num_disappearing_links--;
- DEBUG (5, fprintf (gc_debug_file, "Removed dislink %p (%d)\n", entry, num_disappearing_links));
+ hash_table->num_links--;
+ DEBUG (5, fprintf (gc_debug_file, "Removed dislink %p (%d) from %s\n", entry, hash_table->num_links, dislink_table_name (hash_table)));
free_internal_mem (entry);
+ *link = NULL;
} else {
- entry->object = obj; /* we allow the change of object */
+ *link = HIDE_POINTER (obj, track); /* we allow the change of object */
}
- UNLOCK_GC;
return;
}
prev = entry;
}
- entry = get_internal_mem (sizeof (FinalizeEntry));
- entry->object = obj;
- entry->data = link;
+ if (obj == NULL)
+ return;
+ entry = get_internal_mem (sizeof (DisappearingLink));
+ *link = HIDE_POINTER (obj, track);
+ entry->link = link;
entry->next = disappearing_link_hash [hash];
disappearing_link_hash [hash] = entry;
- num_disappearing_links++;
- DEBUG (5, fprintf (gc_debug_file, "Added dislink %p for object: %p (%s) at %p\n", entry, obj, obj->vtable->klass->name, link));
- UNLOCK_GC;
+ hash_table->num_links++;
+ DEBUG (5, fprintf (gc_debug_file, "Added dislink %p for object: %p (%s) at %p to %s\n", entry, obj, obj->vtable->klass->name, link, dislink_table_name (hash_table)));
+}
+
+/* LOCKING: assumes the GC lock is held */
+static void
+mono_gc_register_disappearing_link (MonoObject *obj, void **link, gboolean track)
+{
+ add_or_remove_disappearing_link (NULL, link, FALSE, &minor_disappearing_link_hash);
+ add_or_remove_disappearing_link (NULL, link, FALSE, &major_disappearing_link_hash);
+ if (obj) {
+ if (ptr_in_nursery (obj))
+ add_or_remove_disappearing_link (obj, link, track, &minor_disappearing_link_hash);
+ else
+ add_or_remove_disappearing_link (obj, link, track, &major_disappearing_link_hash);
+ }
}
int
mono_gc_invoke_finalizers (void)
{
- FinalizeEntry *entry;
+ FinalizeEntry *entry = NULL;
+ gboolean entry_is_critical;
int count = 0;
void *obj;
/* FIXME: batch to reduce lock contention */
- while (fin_ready_list) {
+ while (fin_ready_list || critical_fin_list) {
LOCK_GC;
- entry = fin_ready_list;
+
if (entry) {
- fin_ready_list = entry->next;
+ FinalizeEntry **list = entry_is_critical ? &critical_fin_list : &fin_ready_list;
+
+ /* We have finalized entry in the last
+ interation, now we need to remove it from
+ the list. */
+ if (*list == entry)
+ *list = entry->next;
+ else {
+ FinalizeEntry *e = *list;
+ while (e->next != entry)
+ e = e->next;
+ e->next = entry->next;
+ }
+ free_internal_mem (entry);
+ entry = NULL;
+ }
+
+ /* Now look for the first non-null entry. */
+ for (entry = fin_ready_list; entry && !entry->object; entry = entry->next)
+ ;
+ if (entry) {
+ entry_is_critical = FALSE;
+ } else {
+ entry_is_critical = TRUE;
+ for (entry = critical_fin_list; entry && !entry->object; entry = entry->next)
+ ;
+ }
+
+ if (entry) {
+ g_assert (entry->object);
num_ready_finalizers--;
obj = entry->object;
+ entry->object = NULL;
DEBUG (7, fprintf (gc_debug_file, "Finalizing object %p (%s)\n", obj, safe_name (obj)));
}
+
UNLOCK_GC;
- if (entry) {
- void (*callback)(void *, void*) = entry->data;
- entry->next = NULL;
- obj = entry->object;
- count++;
- /* the object is on the stack so it is pinned */
- /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
- callback (obj, NULL);
- free_internal_mem (entry);
- }
+
+ if (!entry)
+ break;
+
+ g_assert (entry->object == NULL);
+ count++;
+ /* the object is on the stack so it is pinned */
+ /*g_print ("Calling finalizer for object: %p (%s)\n", entry->object, safe_name (entry->object));*/
+ mono_gc_run_finalize (obj, NULL);
}
+ g_assert (!entry);
return count;
}
gboolean
mono_gc_pending_finalizers (void)
{
- return fin_ready_list != NULL;
+ return fin_ready_list || critical_fin_list;
}
/* Negative value to remove */
*/
static void
-rehash_roots (void)
+rehash_roots (gboolean pinned)
{
int i;
unsigned int hash;
RootRecord **new_hash;
RootRecord *entry, *next;
- int new_size = g_spaced_primes_closest (num_roots_entries);
+ int new_size;
+ new_size = g_spaced_primes_closest (num_roots_entries [pinned]);
new_hash = get_internal_mem (new_size * sizeof (RootRecord*));
- for (i = 0; i < roots_hash_size; ++i) {
- for (entry = roots_hash [i]; entry; entry = next) {
+ for (i = 0; i < roots_hash_size [pinned]; ++i) {
+ for (entry = roots_hash [pinned][i]; entry; entry = next) {
hash = mono_aligned_addr_hash (entry->start_root) % new_size;
next = entry->next;
entry->next = new_hash [hash];
new_hash [hash] = entry;
}
}
- free_internal_mem (roots_hash);
- roots_hash = new_hash;
- roots_hash_size = new_size;
+ free_internal_mem (roots_hash [pinned]);
+ roots_hash [pinned] = new_hash;
+ roots_hash_size [pinned] = new_size;
+}
+
+static RootRecord*
+find_root (int root_type, char *start, guint32 addr_hash)
+{
+ RootRecord *new_root;
+
+ guint32 hash = addr_hash % roots_hash_size [root_type];
+ for (new_root = roots_hash [root_type][hash]; new_root; new_root = new_root->next) {
+ /* we allow changing the size and the descriptor (for thread statics etc) */
+ if (new_root->start_root == start) {
+ return new_root;
+ }
+ }
+
+ return NULL;
}
/*
* We do not coalesce roots.
*/
-int
-mono_gc_register_root (char *start, size_t size, void *descr)
+static int
+mono_gc_register_root_inner (char *start, size_t size, void *descr, int root_type)
{
RootRecord *new_root;
- unsigned int hash = mono_aligned_addr_hash (start);
+ unsigned int hash, addr_hash = mono_aligned_addr_hash (start);
+ int i;
LOCK_GC;
- if (num_roots_entries >= roots_hash_size * 2)
- rehash_roots ();
- hash %= roots_hash_size;
- for (new_root = roots_hash [hash]; new_root; new_root = new_root->next) {
+ for (i = 0; i < ROOT_TYPE_NUM; ++i) {
+ if (num_roots_entries [i] >= roots_hash_size [i] * 2)
+ rehash_roots (i);
+ }
+ for (i = 0; i < ROOT_TYPE_NUM; ++i) {
+ new_root = find_root (i, start, addr_hash);
/* we allow changing the size and the descriptor (for thread statics etc) */
- if (new_root->start_root == start) {
+ if (new_root) {
size_t old_size = new_root->end_root - new_root->start_root;
new_root->end_root = new_root->start_root + size;
+ g_assert (((new_root->root_desc != 0) && (descr != NULL)) ||
+ ((new_root->root_desc == 0) && (descr == NULL)));
new_root->root_desc = (mword)descr;
roots_size += size;
roots_size -= old_size;
new_root->end_root = new_root->start_root + size;
new_root->root_desc = (mword)descr;
roots_size += size;
- num_roots_entries++;
- new_root->next = roots_hash [hash];
- roots_hash [hash] = new_root;
+ hash = addr_hash % roots_hash_size [root_type];
+ num_roots_entries [root_type]++;
+ new_root->next = roots_hash [root_type] [hash];
+ roots_hash [root_type][hash] = new_root;
DEBUG (3, fprintf (gc_debug_file, "Added root %p for range: %p-%p, descr: %p (%d/%d bytes)\n", new_root, new_root->start_root, new_root->end_root, descr, (int)size, (int)roots_size));
} else {
UNLOCK_GC;
return TRUE;
}
+int
+mono_gc_register_root (char *start, size_t size, void *descr)
+{
+ return mono_gc_register_root_inner (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED);
+}
+
+int
+mono_gc_register_root_wbarrier (char *start, size_t size, void *descr)
+{
+ return mono_gc_register_root_inner (start, size, descr, ROOT_TYPE_WBARRIER);
+}
+
void
mono_gc_deregister_root (char* addr)
{
- RootRecord *tmp, *prev = NULL;
- unsigned int hash = mono_aligned_addr_hash (addr);
+ RootRecord *tmp, *prev;
+ unsigned int hash, addr_hash = mono_aligned_addr_hash (addr);
+ int root_type;
+
LOCK_GC;
- hash %= roots_hash_size;
- tmp = roots_hash [hash];
- while (tmp) {
- if (tmp->start_root == (char*)addr) {
- if (prev)
- prev->next = tmp->next;
- else
- roots_hash [hash] = tmp->next;
- roots_size -= (tmp->end_root - tmp->start_root);
- num_roots_entries--;
- DEBUG (3, fprintf (gc_debug_file, "Removed root %p for range: %p-%p\n", tmp, tmp->start_root, tmp->end_root));
- free_internal_mem (tmp);
- break;
+ for (root_type = 0; root_type < ROOT_TYPE_NUM; ++root_type) {
+ hash = addr_hash % roots_hash_size [root_type];
+ tmp = roots_hash [root_type][hash];
+ prev = NULL;
+ while (tmp) {
+ if (tmp->start_root == (char*)addr) {
+ if (prev)
+ prev->next = tmp->next;
+ else
+ roots_hash [root_type][hash] = tmp->next;
+ roots_size -= (tmp->end_root - tmp->start_root);
+ num_roots_entries [root_type]--;
+ DEBUG (3, fprintf (gc_debug_file, "Removed root %p for range: %p-%p\n", tmp, tmp->start_root, tmp->end_root));
+ free_internal_mem (tmp);
+ break;
+ }
+ prev = tmp;
+ tmp = tmp->next;
}
- prev = tmp;
- tmp = tmp->next;
}
UNLOCK_GC;
}
char **tlab_temp_end_addr;
char **tlab_real_end_addr;
RememberedSet *remset;
+ gpointer runtime_data;
};
/* FIXME: handle large/small config */
SgenThreadInfo *info = thread_info_lookup (ARCH_GET_THREAD ());
info->stack_start = align_pointer (&ptr);
ARCH_STORE_REGS (ptr);
+ if (gc_callbacks.thread_suspend_func)
+ gc_callbacks.thread_suspend_func (info->runtime_data, NULL);
}
static const char*
/* LOCKING: assumes the GC lock is held (by the stopping thread) */
static void
-suspend_handler (int sig)
+suspend_handler (int sig, siginfo_t *siginfo, void *context)
{
SgenThreadInfo *info;
pthread_t id;
*/
info->stack_start = align_pointer (&id);
+ /* Notify the JIT */
+ if (gc_callbacks.thread_suspend_func)
+ gc_callbacks.thread_suspend_func (info->runtime_data, context);
+
/* notify the waiting thread */
sem_post (&suspend_ack_semaphore);
info->stop_count = stop_count;
#endif /* USE_SIGNAL_BASED_START_STOP_WORLD */
+void
+mono_gc_set_gc_callbacks (MonoGCCallbacks *callbacks)
+{
+ gc_callbacks = *callbacks;
+}
+
+/* Variables holding start/end nursery so it won't have to be passed at every call */
+static void *scan_area_arg_start, *scan_area_arg_end;
+
+void
+mono_gc_conservatively_scan_area (void *start, void *end)
+{
+ conservatively_pin_objects_from (start, end, scan_area_arg_start, scan_area_arg_end);
+}
+
+void*
+mono_gc_scan_object (void *obj)
+{
+ return copy_object (obj, scan_area_arg_start, scan_area_arg_end);
+}
+
/*
- * Identify objects pinned in a thread stack and its registers.
+ * Mark from thread stacks and registers.
*/
static void
-pin_thread_data (void *start_nursery, void *end_nursery)
+scan_thread_data (void *start_nursery, void *end_nursery, gboolean precise)
{
int i;
SgenThreadInfo *info;
+ scan_area_arg_start = start_nursery;
+ scan_area_arg_end = end_nursery;
+
for (i = 0; i < THREAD_HASH_SIZE; ++i) {
for (info = thread_table [i]; info; info = info->next) {
if (info->skip) {
DEBUG (2, fprintf (gc_debug_file, "Skipping dead thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
continue;
}
- DEBUG (2, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start));
- conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery);
+ DEBUG (2, fprintf (gc_debug_file, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%d\n", info, info->stack_start, info->stack_end, (char*)info->stack_end - (char*)info->stack_start, next_pin_slot));
+ if (gc_callbacks.thread_mark_func)
+ gc_callbacks.thread_mark_func (info->runtime_data, info->stack_start, info->stack_end, precise);
+ else if (!precise)
+ conservatively_pin_objects_from (info->stack_start, info->stack_end, start_nursery, end_nursery);
}
}
- DEBUG (2, fprintf (gc_debug_file, "Scanning current thread registers\n"));
- conservatively_pin_objects_from ((void*)cur_thread_regs, (void*)(cur_thread_regs + ARCH_NUM_REGS), start_nursery, end_nursery);
+ DEBUG (2, fprintf (gc_debug_file, "Scanning current thread registers, pinned=%d\n", next_pin_slot));
+ if (!precise)
+ conservatively_pin_objects_from ((void*)cur_thread_regs, (void*)(cur_thread_regs + ARCH_NUM_REGS), start_nursery, end_nursery);
}
static void
switch ((*p) & REMSET_TYPE_MASK) {
case REMSET_LOCATION:
ptr = (void**)(*p);
+ //__builtin_prefetch (ptr);
if (((void*)ptr < start_nursery || (void*)ptr >= end_nursery) && ptr_in_heap (ptr)) {
*ptr = copy_object (*ptr, start_nursery, end_nursery);
DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p\n", ptr, *ptr));
- if (!global && *ptr >= start_nursery && *ptr < end_nursery)
- add_to_global_remset (ptr);
+ if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
+ /*
+ * If the object is pinned, each reference to it from nonpinned objects
+ * becomes part of the global remset, which can grow very large.
+ */
+ DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
+ add_to_global_remset (ptr, FALSE);
+ }
} else {
DEBUG (9, fprintf (gc_debug_file, "Skipping remset at %p holding %p\n", ptr, *ptr));
}
*ptr = copy_object (*ptr, start_nursery, end_nursery);
DEBUG (9, fprintf (gc_debug_file, "Overwrote remset at %p with %p (count: %d)\n", ptr, *ptr, (int)count));
if (!global && *ptr >= start_nursery && *ptr < end_nursery)
- add_to_global_remset (ptr);
+ add_to_global_remset (ptr, FALSE);
++ptr;
}
return p + 2;
return p + 1;
scan_object (*ptr, start_nursery, end_nursery);
return p + 1;
- case REMSET_VTYPE:
+ case REMSET_OTHER: {
ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery) || !ptr_in_heap (ptr))
+
+ switch (p [1]) {
+ case REMSET_VTYPE:
+ if (((void*)ptr >= start_nursery && (void*)ptr < end_nursery) || !ptr_in_heap (ptr))
+ return p + 3;
+ desc = p [2];
+ scan_vtype ((char*)ptr, desc, start_nursery, end_nursery);
+ return p + 3;
+ case REMSET_ROOT_LOCATION:
+ /* Same as REMSET_LOCATION, but the address is not required to be in the heap */
+ *ptr = copy_object (*ptr, start_nursery, end_nursery);
+ DEBUG (9, fprintf (gc_debug_file, "Overwrote root location remset at %p with %p\n", ptr, *ptr));
+ if (!global && *ptr >= start_nursery && *ptr < end_nursery) {
+ /*
+ * If the object is pinned, each reference to it from nonpinned objects
+ * becomes part of the global remset, which can grow very large.
+ */
+ DEBUG (9, fprintf (gc_debug_file, "Add to global remset because of pinning %p (%p %s)\n", ptr, *ptr, safe_name (*ptr)));
+ add_to_global_remset (ptr, TRUE);
+ }
return p + 2;
- desc = p [1];
- scan_vtype ((char*)ptr, desc, start_nursery, end_nursery);
- return p + 2;
+ default:
+ g_assert_not_reached ();
+ }
+ break;
+ }
default:
g_assert_not_reached ();
}
int i;
SgenThreadInfo *info;
RememberedSet *remset, *next;
- mword *p;
+ mword *p, *next_p, *store_pos;
/* the global one */
for (remset = global_remset; remset; remset = remset->next) {
DEBUG (4, fprintf (gc_debug_file, "Scanning global remset range: %p-%p, size: %zd\n", remset->data, remset->store_next, remset->store_next - remset->data));
- for (p = remset->data; p < remset->store_next;) {
- p = handle_remset (p, start_nursery, end_nursery, TRUE);
+ store_pos = remset->data;
+ for (p = remset->data; p < remset->store_next; p = next_p) {
+ mword ptr;
+
+ next_p = handle_remset (p, start_nursery, end_nursery, TRUE);
+
+ /*
+ * Clear global remsets of locations which no longer point to the
+ * nursery. Otherwise, they could grow indefinitely between major
+ * collections.
+ */
+ ptr = (p [0] & ~REMSET_TYPE_MASK);
+ if ((p [0] & REMSET_TYPE_MASK) == REMSET_LOCATION) {
+ if (ptr_in_nursery (*(void**)ptr))
+ *store_pos ++ = p [0];
+ } else {
+ g_assert ((p [0] & REMSET_TYPE_MASK) == REMSET_OTHER);
+ g_assert (p [1] == REMSET_ROOT_LOCATION);
+ if (ptr_in_nursery (*(void**)ptr)) {
+ *store_pos ++ = p [0];
+ *store_pos ++ = p [1];
+ }
+ }
}
+
+ /* Truncate the remset */
+ remset->store_next = store_pos;
}
+
/* the per-thread ones */
for (i = 0; i < THREAD_HASH_SIZE; ++i) {
for (info = thread_table [i]; info; info = info->next) {
remembered_set = info->remset = alloc_remset (DEFAULT_REMSET_SIZE, info);
pthread_setspecific (remembered_set_key, remembered_set);
DEBUG (3, fprintf (gc_debug_file, "registered thread %p (%p) (hash: %d)\n", info, (gpointer)info->id, hash));
+
+ if (gc_callbacks.thread_attach_func)
+ info->runtime_data = gc_callbacks.thread_attach_func ();
+
return info;
}
mono_gc_wbarrier_set_field (MonoObject *obj, gpointer field_ptr, MonoObject* value)
{
RememberedSet *rs;
- if ((char*)field_ptr >= nursery_start && (char*)field_ptr < nursery_real_end) {
+ if (ptr_in_nursery (field_ptr)) {
*(void**)field_ptr = value;
return;
}
mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
{
RememberedSet *rs = remembered_set;
- if ((char*)slot_ptr >= nursery_start && (char*)slot_ptr < nursery_real_end) {
+ if (ptr_in_nursery (slot_ptr)) {
*(void**)slot_ptr = value;
return;
}
mono_gc_wbarrier_arrayref_copy (MonoArray *arr, gpointer slot_ptr, int count)
{
RememberedSet *rs = remembered_set;
- if ((char*)slot_ptr >= nursery_start && (char*)slot_ptr < nursery_real_end)
+ if (ptr_in_nursery (slot_ptr))
return;
DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p, %d\n", slot_ptr, count));
if (rs->store_next + 1 < rs->end_set) {
void
mono_gc_wbarrier_generic_store (gpointer ptr, MonoObject* value)
{
- RememberedSet *rs = remembered_set;
- if ((char*)ptr >= nursery_start && (char*)ptr < nursery_real_end) {
+ RememberedSet *rs;
+ if (ptr_in_nursery (ptr)) {
DEBUG (8, fprintf (gc_debug_file, "Skipping remset at %p\n", ptr));
*(void**)ptr = value;
return;
}
+ rs = remembered_set;
DEBUG (8, fprintf (gc_debug_file, "Adding remset at %p (%s)\n", ptr, value ? safe_name (value) : "null"));
/* FIXME: ensure it is on the heap */
if (rs->store_next < rs->end_set) {
}
void
-mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
+mono_gc_wbarrier_set_root (gpointer ptr, MonoObject *value)
{
RememberedSet *rs = remembered_set;
- if ((char*)dest >= nursery_start && (char*)dest < nursery_real_end) {
+ if (ptr_in_nursery (ptr))
+ return;
+ DEBUG (8, fprintf (gc_debug_file, "Adding root remset at %p (%s)\n", ptr, value ? safe_name (value) : "null"));
+
+ if (rs->store_next + 2 < rs->end_set) {
+ *(rs->store_next++) = (mword)ptr | REMSET_OTHER;
+ *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION;
+ *(void**)ptr = value;
return;
}
- DEBUG (1, fprintf (gc_debug_file, "Adding value remset at %p, count %d for class %s\n", dest, count, klass->name));
+ rs = alloc_remset (rs->end_set - rs->data, (void*)1);
+ rs->next = remembered_set;
+ remembered_set = rs;
+ thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
+ *(rs->store_next++) = (mword)ptr | REMSET_OTHER;
+ *(rs->store_next++) = (mword)REMSET_ROOT_LOCATION;
- if (rs->store_next + 1 < rs->end_set) {
- *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
+ *(void**)ptr = value;
+}
+
+void
+mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
+{
+ RememberedSet *rs = remembered_set;
+ if (ptr_in_nursery (dest))
+ return;
+ DEBUG (8, fprintf (gc_debug_file, "Adding value remset at %p, count %d for class %s\n", dest, count, klass->name));
+
+ if (rs->store_next + 2 < rs->end_set) {
+ *(rs->store_next++) = (mword)dest | REMSET_OTHER;
+ *(rs->store_next++) = (mword)REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
return;
}
rs->next = remembered_set;
remembered_set = rs;
thread_info_lookup (ARCH_GET_THREAD ())->remset = rs;
- *(rs->store_next++) = (mword)dest | REMSET_VTYPE;
+ *(rs->store_next++) = (mword)dest | REMSET_OTHER;
+ *(rs->store_next++) = (mword)REMSET_VTYPE;
*(rs->store_next++) = (mword)klass->gc_descr;
}
mword desc;
int type;
- if ((ptr >= nursery_start) && (ptr < nursery_real_end)) {
+ if (ptr_in_nursery (ptr)) {
printf ("Pointer inside nursery.\n");
} else {
for (section = section_list; section;) {
}
}
+ if (object_is_pinned (ptr))
+ printf ("Object is pinned.\n");
+
+ if (object_is_forwarded (ptr))
+ printf ("Object is forwared.\n");
+
// FIXME: Handle pointers to the inside of objects
vtable = (MonoVTable*)LOAD_VTABLE (ptr);
printf ("VTable is invalid (empty).\n");
return;
}
- if (((char*)vtable >= nursery_start) && ((char*)vtable < nursery_real_end)) {
+ if (ptr_in_nursery (vtable)) {
printf ("VTable is invalid (points inside nursery).\n");
return;
}
if ((void**)addr >= ptr && (void**)addr < ptr + count)
*found = TRUE;
return p + 1;
- case REMSET_VTYPE:
- ptr = (void**)(*p & ~REMSET_TYPE_MASK);
- desc = p [1];
-
- switch (desc & 0x7) {
- case DESC_TYPE_RUN_LENGTH:
- OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
- /* The descriptor includes the size of MonoObject */
- skip_size -= sizeof (MonoObject);
- if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
- *found = TRUE;
- break;
+ case REMSET_OTHER: {
+ switch (p [1]) {
+ case REMSET_VTYPE:
+ ptr = (void**)(*p & ~REMSET_TYPE_MASK);
+ desc = p [2];
+
+ switch (desc & 0x7) {
+ case DESC_TYPE_RUN_LENGTH:
+ OBJ_RUN_LEN_SIZE (skip_size, desc, ptr);
+ /* The descriptor includes the size of MonoObject */
+ skip_size -= sizeof (MonoObject);
+ if ((void**)addr >= ptr && (void**)addr < ptr + (skip_size / sizeof (gpointer)))
+ *found = TRUE;
+ break;
+ default:
+ // FIXME:
+ g_assert_not_reached ();
+ }
+
+ return p + 3;
+ case REMSET_ROOT_LOCATION:
+ return p + 2;
default:
- // FIXME:
g_assert_not_reached ();
}
-
- return p + 2;
+ break;
+ }
default:
g_assert_not_reached ();
}
type_rlen++;
continue;
} else if (type == DESC_TYPE_VECTOR) { // includes ARRAY, too
- skip_size = (vt->desc >> LOW_TYPE_BITS) & MAX_ELEMENT_SIZE;
- skip_size *= mono_array_length ((MonoArray*)start);
- skip_size += sizeof (MonoArray);
+ skip_size = safe_object_get_size ((MonoObject*)start);
skip_size += (ALLOC_ALIGN - 1);
skip_size &= ~(ALLOC_ALIGN - 1);
OBJ_VECTOR_FOREACH_PTR (vt, start);
int
mono_gc_get_generation (MonoObject *obj)
{
- if ((char*)obj >= nursery_start && (char*)obj < nursery_real_end)
+ if (ptr_in_nursery (obj))
return 0;
return 1;
}
}
void
-mono_gc_weak_link_add (void **link_addr, MonoObject *obj)
+mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track)
{
- mono_gc_register_disappearing_link (obj, link_addr);
- *link_addr = obj;
+ LOCK_GC;
+ mono_gc_register_disappearing_link (obj, link_addr, track);
+ UNLOCK_GC;
}
void
mono_gc_weak_link_remove (void **link_addr)
{
- mono_gc_register_disappearing_link (NULL, link_addr);
- *link_addr = NULL;
+ LOCK_GC;
+ mono_gc_register_disappearing_link (NULL, link_addr, FALSE);
+ UNLOCK_GC;
}
MonoObject*
mono_gc_weak_link_get (void **link_addr)
{
- return *link_addr;
+ if (!*link_addr)
+ return NULL;
+ return (MonoObject*) REVEAL_POINTER (*link_addr);
}
void*
mono_gc_make_descr_from_bitmap (gsize *bitmap, int numbits)
{
if (numbits < ((sizeof (*bitmap) * 8) - ROOT_DESC_TYPE_SHIFT)) {
- mword desc = ROOT_DESC_BITMAP | (bitmap [0] << ROOT_DESC_TYPE_SHIFT);
- return (void*)desc;
+ return (void*)MAKE_ROOT_DESC (ROOT_DESC_BITMAP, bitmap [0]);
+ } else {
+ mword complex = alloc_complex_descriptor (bitmap, numbits + 1);
+ return (void*)MAKE_ROOT_DESC (ROOT_DESC_COMPLEX, complex);
}
- /* conservative scanning */
- DEBUG (3, fprintf (gc_debug_file, "Conservative root descr for size: %d\n", numbits));
- return NULL;
+}
+
+void*
+mono_gc_make_root_descr_user (MonoGCMarkFunc marker)
+{
+ void *descr;
+
+ g_assert (user_descriptors_next < MAX_USER_DESCRIPTORS);
+ descr = (void*)MAKE_ROOT_DESC (ROOT_DESC_USER, (mword)user_descriptors_next);
+ user_descriptors [user_descriptors_next ++] = marker;
+
+ return descr;
}
void*
collect_before_allocs = TRUE;
} else if (!strcmp (opt, "check-at-minor-collections")) {
consistency_check_at_minor_collection = TRUE;
+ } else if (!strcmp (opt, "clear-at-gc")) {
+ nursery_clear_policy = CLEAR_AT_GC;
} else {
fprintf (stderr, "Invalid format for the MONO_GC_DEBUG env variable: '%s'\n", env);
fprintf (stderr, "The format is: MONO_GC_DEBUG=[l[:filename]|<option>]+ where l is a debug level 0-9.\n");
- fprintf (stderr, "Valid options are: collect-before-allocs, check-at-minor-collections.\n");
+ fprintf (stderr, "Valid options are: collect-before-allocs, check-at-minor-collections, clear-at-gc.\n");
exit (1);
}
}
sigfillset (&sinfo.sa_mask);
sinfo.sa_flags = SA_RESTART | SA_SIGINFO;
- sinfo.sa_handler = suspend_handler;
+ sinfo.sa_sigaction = suspend_handler;
if (sigaction (suspend_signal_num, &sinfo, NULL) != 0) {
g_error ("failed sigaction");
}
return ATYPE_NUM;
}
+static MonoMethod *write_barrier_method;
+
+MonoMethod*
+mono_gc_get_write_barrier (void)
+{
+ MonoMethod *res;
+ int remset_offset = -1;
+ int remset_var, next_var;
+ MonoMethodBuilder *mb;
+ MonoMethodSignature *sig;
+ int label1, label2;
+
+ MONO_THREAD_VAR_OFFSET (remembered_set, remset_offset);
+
+ // FIXME: Maybe create a separate version for ctors (the branch would be
+ // correctly predicted more times)
+ if (write_barrier_method)
+ return write_barrier_method;
+
+ /* Create the IL version of mono_gc_barrier_generic_store () */
+ sig = mono_metadata_signature_alloc (mono_defaults.corlib, 2);
+ sig->ret = &mono_defaults.void_class->byval_arg;
+ sig->params [0] = &mono_defaults.int_class->byval_arg;
+ sig->params [1] = &mono_defaults.object_class->byval_arg;
+
+ mb = mono_mb_new (mono_defaults.object_class, "wbarrier", MONO_WRAPPER_WRITE_BARRIER);
+
+ /* ptr_in_nursery () check */
+#ifdef ALIGN_NURSERY
+ /*
+ * Masking out the bits might be faster, but we would have to use 64 bit
+ * immediates, which might be slower.
+ */
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_icon (mb, DEFAULT_NURSERY_BITS);
+ mono_mb_emit_byte (mb, CEE_SHR_UN);
+ mono_mb_emit_icon (mb, (mword)nursery_start >> DEFAULT_NURSERY_BITS);
+ label1 = mono_mb_emit_branch (mb, CEE_BNE_UN);
+#else
+ // FIXME:
+ g_assert_not_reached ();
+#endif
+
+ /* Don't need write barrier case */
+ /* do the assignment */
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_ldarg (mb, 1);
+ /* Don't use STIND_REF, as it would cause infinite recursion */
+ mono_mb_emit_byte (mb, CEE_STIND_I);
+ mono_mb_emit_byte (mb, CEE_RET);
+
+ /* Need write barrier case */
+ mono_mb_patch_branch (mb, label1);
+
+ if (remset_offset == -1)
+ // FIXME:
+ g_assert_not_reached ();
+
+ // remset_var = remembered_set;
+ remset_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
+ mono_mb_emit_byte (mb, CEE_MONO_TLS);
+ mono_mb_emit_i4 (mb, remset_offset);
+ mono_mb_emit_stloc (mb, remset_var);
+
+ // next_var = rs->store_next
+ next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
+ mono_mb_emit_ldloc (mb, remset_var);
+ mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (RememberedSet, store_next));
+ mono_mb_emit_byte (mb, CEE_LDIND_I);
+ mono_mb_emit_stloc (mb, next_var);
+
+ // if (rs->store_next < rs->end_set) {
+ mono_mb_emit_ldloc (mb, next_var);
+ mono_mb_emit_ldloc (mb, remset_var);
+ mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (RememberedSet, end_set));
+ mono_mb_emit_byte (mb, CEE_LDIND_I);
+ label2 = mono_mb_emit_branch (mb, CEE_BGE);
+
+ /* write barrier fast path */
+ // *(rs->store_next++) = (mword)ptr;
+ mono_mb_emit_ldloc (mb, next_var);
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_byte (mb, CEE_STIND_I);
+
+ mono_mb_emit_ldloc (mb, next_var);
+ mono_mb_emit_icon (mb, sizeof (gpointer));
+ mono_mb_emit_byte (mb, CEE_ADD);
+ mono_mb_emit_stloc (mb, next_var);
+
+ mono_mb_emit_ldloc (mb, remset_var);
+ mono_mb_emit_ldflda (mb, G_STRUCT_OFFSET (RememberedSet, store_next));
+ mono_mb_emit_ldloc (mb, next_var);
+ mono_mb_emit_byte (mb, CEE_STIND_I);
+
+ // *(void**)ptr = value;
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_ldarg (mb, 1);
+ mono_mb_emit_byte (mb, CEE_STIND_I);
+ mono_mb_emit_byte (mb, CEE_RET);
+
+ /* write barrier slow path */
+ mono_mb_patch_branch (mb, label2);
+
+ mono_mb_emit_ldarg (mb, 0);
+ mono_mb_emit_ldarg (mb, 1);
+ mono_mb_emit_icall (mb, mono_gc_wbarrier_generic_store);
+ mono_mb_emit_byte (mb, CEE_RET);
+
+ res = mono_mb_create_method (mb, sig, 16);
+ mono_mb_free (mb);
+
+ mono_loader_lock ();
+ if (write_barrier_method) {
+ /* Already created */
+ mono_free_method (res);
+ } else {
+ /* double-checked locking */
+ mono_memory_barrier ();
+ write_barrier_method = res;
+ }
+ mono_loader_unlock ();
+
+ return write_barrier_method;
+}
+
#endif /* HAVE_SGEN_GC */