X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmetadata%2Fsgen-mono.c;h=bb06f7eec0273cf41821d64e82c7c4c77bebffb6;hb=bc9d5d113ab7064fc199a2f430751643466cb477;hp=9f4830236bf6bedc0531f278930117cbc6dcf3d1;hpb=01276385838acc9250404c335abe72f1be423b7e;p=mono.git diff --git a/mono/metadata/sgen-mono.c b/mono/metadata/sgen-mono.c index 9f4830236bf..bb06f7eec02 100644 --- a/mono/metadata/sgen-mono.c +++ b/mono/metadata/sgen-mono.c @@ -112,7 +112,7 @@ mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass * HEAVY_STAT (++stat_wbarrier_value_copy); g_assert (klass->valuetype); - SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass); + SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, (gpointer)klass->gc_descr, klass->name, klass); if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)klass->gc_descr)) { size_t element_size = mono_class_value_size (klass, NULL); @@ -354,15 +354,15 @@ mono_gc_get_write_barrier (void) */ /* Vtable of the objects used to fill out nursery fragments before a collection */ -static GCVTable *array_fill_vtable; +static GCVTable array_fill_vtable; -static GCVTable* +static GCVTable get_array_fill_vtable (void) { if (!array_fill_vtable) { static MonoClass klass; static char _vtable[sizeof(MonoVTable)+8]; - MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8); + MonoVTable* vtable = (MonoVTable*) ALIGN_TO((mword)_vtable, 8); gsize bmap; MonoDomain *domain = mono_get_root_domain (); @@ -379,7 +379,7 @@ get_array_fill_vtable (void) vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1); vtable->rank = 1; - array_fill_vtable = (GCVTable*)vtable; + array_fill_vtable = vtable; } return array_fill_vtable; } @@ -450,7 +450,7 @@ mono_gc_get_vtable_bits (MonoClass *class) static gboolean is_finalization_aware (MonoObject *obj) { - MonoVTable *vt = ((MonoVTable*)SGEN_LOAD_VTABLE (obj)); + MonoVTable *vt = SGEN_LOAD_VTABLE (obj); return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE; } @@ -463,7 +463,7 @@ sgen_client_object_queued_for_finalization (GCObject *obj) #ifdef ENABLE_DTRACE if (G_UNLIKELY (MONO_GC_FINALIZE_ENQUEUE_ENABLED ())) { int gen = sgen_ptr_in_nursery (obj) ? GENERATION_NURSERY : GENERATION_OLD; - GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (obj); + GCVTable vt = SGEN_LOAD_VTABLE (obj); MONO_GC_FINALIZE_ENQUEUE ((mword)obj, sgen_safe_object_get_size (obj), sgen_client_vtable_get_namespace (vt), sgen_client_vtable_get_name (vt), gen, sgen_client_object_has_critical_finalizer (obj)); @@ -548,12 +548,12 @@ typedef struct _EphemeronLinkNode EphemeronLinkNode; struct _EphemeronLinkNode { EphemeronLinkNode *next; - char *array; + MonoArray *array; }; typedef struct { - void *key; - void *value; + GCObject *key; + GCObject *value; } Ephemeron; static EphemeronLinkNode *ephemeron_list; @@ -594,17 +594,16 @@ sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx) CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object; SgenGrayQueue *queue = ctx.queue; EphemeronLinkNode *current = ephemeron_list, *prev = NULL; - MonoArray *array; Ephemeron *cur, *array_end; - char *tombstone; + GCObject *tombstone; while (current) { - char *object = current->array; + MonoArray *array = current->array; - if (!sgen_is_object_alive_for_current_gen (object)) { + if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) { EphemeronLinkNode *tmp = current; - SGEN_LOG (5, "Dead Ephemeron array at %p", object); + SGEN_LOG (5, "Dead Ephemeron array at %p", array); if (prev) prev->next = current->next; @@ -617,18 +616,17 @@ sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx) continue; } - copy_func ((void**)&object, queue); - current->array = object; + copy_func ((GCObject**)&array, queue); + current->array = array; - SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object); + SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", array); - array = (MonoArray*)object; cur = mono_array_addr (array, Ephemeron, 0); array_end = cur + mono_array_length_fast (array); - tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone; + tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone; for (; cur < array_end; ++cur) { - char *key = (char*)cur->key; + GCObject *key = cur->key; if (!key || key == tombstone) continue; @@ -660,29 +658,27 @@ sgen_client_mark_ephemerons (ScanCopyContext ctx) SgenGrayQueue *queue = ctx.queue; gboolean nothing_marked = TRUE; EphemeronLinkNode *current = ephemeron_list; - MonoArray *array; Ephemeron *cur, *array_end; - char *tombstone; + GCObject *tombstone; for (current = ephemeron_list; current; current = current->next) { - char *object = current->array; - SGEN_LOG (5, "Ephemeron array at %p", object); + MonoArray *array = current->array; + SGEN_LOG (5, "Ephemeron array at %p", array); /*It has to be alive*/ - if (!sgen_is_object_alive_for_current_gen (object)) { + if (!sgen_is_object_alive_for_current_gen ((GCObject*)array)) { SGEN_LOG (5, "\tnot reachable"); continue; } - copy_func ((void**)&object, queue); + copy_func ((GCObject**)&array, queue); - array = (MonoArray*)object; cur = mono_array_addr (array, Ephemeron, 0); array_end = cur + mono_array_length_fast (array); - tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone; + tombstone = SGEN_LOAD_VTABLE ((GCObject*)array)->domain->ephemeron_tombstone; for (; cur < array_end; ++cur) { - char *key = cur->key; + GCObject *key = cur->key; if (!key || key == tombstone) continue; @@ -692,13 +688,13 @@ sgen_client_mark_ephemerons (ScanCopyContext ctx) cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable"); if (sgen_is_object_alive_for_current_gen (key)) { - char *value = cur->value; + GCObject *value = cur->value; - copy_func ((void**)&cur->key, queue); + copy_func (&cur->key, queue); if (value) { if (!sgen_is_object_alive_for_current_gen (value)) nothing_marked = FALSE; - copy_func ((void**)&cur->value, queue); + copy_func (&cur->value, queue); } } } @@ -720,7 +716,7 @@ mono_gc_ephemeron_array_add (MonoObject *obj) UNLOCK_GC; return FALSE; } - node->array = (char*)obj; + node->array = (MonoArray*)obj; node->next = ephemeron_list; ephemeron_list = node; @@ -747,7 +743,7 @@ mono_gc_set_current_thread_appdomain (MonoDomain *domain) } static gboolean -need_remove_object_for_domain (char *start, MonoDomain *domain) +need_remove_object_for_domain (GCObject *start, MonoDomain *domain) { if (mono_object_domain (start) == domain) { SGEN_LOG (4, "Need to cleanup object %p", start); @@ -758,9 +754,9 @@ need_remove_object_for_domain (char *start, MonoDomain *domain) } static void -process_object_for_domain_clearing (char *start, MonoDomain *domain) +process_object_for_domain_clearing (GCObject *start, MonoDomain *domain) { - MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (start); + MonoVTable *vt = SGEN_LOAD_VTABLE (start); if (vt->klass == mono_defaults.internal_thread_class) g_assert (mono_object_domain (start) == mono_get_root_domain ()); /* The object could be a proxy for an object in the domain @@ -780,24 +776,24 @@ process_object_for_domain_clearing (char *start, MonoDomain *domain) } static gboolean -clear_domain_process_object (char *obj, MonoDomain *domain) +clear_domain_process_object (GCObject *obj, MonoDomain *domain) { gboolean remove; process_object_for_domain_clearing (obj, domain); remove = need_remove_object_for_domain (obj, domain); - if (remove && ((MonoObject*)obj)->synchronisation) { - void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj); + if (remove && obj->synchronisation) { + guint32 dislink = mono_monitor_get_object_monitor_gchandle (obj); if (dislink) - sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE); + mono_gchandle_free (dislink); } return remove; } static void -clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain) +clear_domain_process_minor_object_callback (GCObject *obj, size_t size, MonoDomain *domain) { if (clear_domain_process_object (obj, domain)) { CANARIFY_SIZE (size); @@ -806,20 +802,20 @@ clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain * } static void -clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain) +clear_domain_process_major_object_callback (GCObject *obj, size_t size, MonoDomain *domain) { clear_domain_process_object (obj, domain); } static void -clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain) +clear_domain_free_major_non_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain) { if (need_remove_object_for_domain (obj, domain)) major_collector.free_non_pinned_object (obj, size); } static void -clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain) +clear_domain_free_major_pinned_object_callback (GCObject *obj, size_t size, MonoDomain *domain) { if (need_remove_object_for_domain (obj, domain)) major_collector.free_pinned_object (obj, size); @@ -853,7 +849,6 @@ mono_gc_clear_domain (MonoDomain * domain) major_collector.finish_sweeping (); sgen_process_fin_stage_entries (); - sgen_process_dislink_stage_entries (); sgen_clear_nursery_fragments (); @@ -866,15 +861,13 @@ mono_gc_clear_domain (MonoDomain * domain) /*Ephemerons and dislinks must be processed before LOS since they might end up pointing to memory returned to the OS.*/ null_ephemerons_for_domain (domain); - - for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i) - sgen_null_links_if (object_in_domain_predicate, domain, i); + sgen_null_links_for_domain (domain); for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i) sgen_remove_finalizers_if (object_in_domain_predicate, domain, i); sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, - (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE); + (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE, TRUE); /* We need two passes over major and large objects because freeing such objects might give their memory back to the OS @@ -885,11 +878,11 @@ mono_gc_clear_domain (MonoDomain * domain) the first object is a proxy. */ major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain); for (bigobj = los_object_list; bigobj; bigobj = bigobj->next) - clear_domain_process_object (bigobj->data, domain); + clear_domain_process_object ((GCObject*)bigobj->data, domain); prev = NULL; for (bigobj = los_object_list; bigobj;) { - if (need_remove_object_for_domain (bigobj->data, domain)) { + if (need_remove_object_for_domain ((GCObject*)bigobj->data, domain)) { LOSObject *to_free = bigobj; if (prev) prev->next = bigobj->next; @@ -923,35 +916,58 @@ mono_gc_clear_domain (MonoDomain * domain) * Allocation */ +static gboolean alloc_events = FALSE; + +void +mono_gc_enable_alloc_events (void) +{ + alloc_events = TRUE; +} + void* mono_gc_alloc_obj (MonoVTable *vtable, size_t size) { - return sgen_alloc_obj (vtable, size); + MonoObject *obj = sgen_alloc_obj (vtable, size); + + if (G_UNLIKELY (alloc_events)) + mono_profiler_allocation (obj); + + return obj; } void* mono_gc_alloc_pinned_obj (MonoVTable *vtable, size_t size) { - return sgen_alloc_obj_pinned (vtable, size); + MonoObject *obj = sgen_alloc_obj_pinned (vtable, size); + + if (G_UNLIKELY (alloc_events)) + mono_profiler_allocation (obj); + + return obj; } void* mono_gc_alloc_mature (MonoVTable *vtable) { MonoObject *obj = sgen_alloc_obj_mature (vtable, vtable->klass->instance_size); + if (obj && G_UNLIKELY (obj->vtable->klass->has_finalize)) mono_object_register_finalizer (obj); + + if (G_UNLIKELY (alloc_events)) + mono_profiler_allocation (obj); + return obj; } void* -mono_gc_alloc_fixed (size_t size, void *descr) +mono_gc_alloc_fixed (size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg) { /* FIXME: do a single allocation */ void *res = calloc (1, size); if (!res) return NULL; - if (!mono_gc_register_root (res, size, descr)) { + if (!mono_gc_register_root (res, size, descr, source, msg)) { free (res); res = NULL; } @@ -970,6 +986,7 @@ mono_gc_free_fixed (void* addr) */ static MonoMethod* alloc_method_cache [ATYPE_NUM]; +static MonoMethod* slowpath_alloc_method_cache [ATYPE_NUM]; static gboolean use_managed_allocator = TRUE; #ifdef MANAGED_ALLOCATION @@ -990,7 +1007,7 @@ static gboolean use_managed_allocator = TRUE; #else -#if defined(__APPLE__) || defined (HOST_WIN32) +#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS) #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \ mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \ mono_mb_emit_byte ((mb), CEE_MONO_TLS); \ @@ -1025,7 +1042,7 @@ static gboolean use_managed_allocator = TRUE; * that they are executed atomically via the restart mechanism. */ static MonoMethod* -create_allocator (int atype) +create_allocator (int atype, gboolean slowpath) { int p_var, size_var; guint32 slowpath_branch, max_size_branch; @@ -1034,9 +1051,9 @@ create_allocator (int atype) MonoMethodSignature *csig; static gboolean registered = FALSE; int tlab_next_addr_var, new_next_var; - int num_params, i; const char *name = NULL; AllocatorWrapperInfo *info; + int num_params, i; if (!registered) { mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE); @@ -1046,21 +1063,22 @@ create_allocator (int atype) } if (atype == ATYPE_SMALL) { - num_params = 2; - name = "AllocSmall"; + name = slowpath ? "SlowAllocSmall" : "AllocSmall"; } else if (atype == ATYPE_NORMAL) { - num_params = 1; - name = "Alloc"; + name = slowpath ? "SlowAlloc" : "Alloc"; } else if (atype == ATYPE_VECTOR) { - num_params = 2; - name = "AllocVector"; + name = slowpath ? "SlowAllocVector" : "AllocVector"; } else if (atype == ATYPE_STRING) { - num_params = 2; - name = "AllocString"; + name = slowpath ? "SlowAllocString" : "AllocString"; } else { g_assert_not_reached (); } + if (atype == ATYPE_NORMAL) + num_params = 1; + else + num_params = 2; + csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params); if (atype == ATYPE_STRING) { csig->ret = &mono_defaults.string_class->byval_arg; @@ -1068,13 +1086,36 @@ create_allocator (int atype) csig->params [1] = &mono_defaults.int32_class->byval_arg; } else { csig->ret = &mono_defaults.object_class->byval_arg; - for (i = 0; i < num_params; ++i) + for (i = 0; i < num_params; i++) csig->params [i] = &mono_defaults.int_class->byval_arg; } mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC); #ifndef DISABLE_JIT + if (slowpath) { + switch (atype) { + case ATYPE_NORMAL: + case ATYPE_SMALL: + mono_mb_emit_ldarg (mb, 0); + mono_mb_emit_icall (mb, mono_object_new_specific); + break; + case ATYPE_VECTOR: + mono_mb_emit_ldarg (mb, 0); + mono_mb_emit_ldarg (mb, 1); + mono_mb_emit_icall (mb, mono_array_new_specific); + break; + case ATYPE_STRING: + mono_mb_emit_ldarg (mb, 1); + mono_mb_emit_icall (mb, mono_string_alloc); + break; + default: + g_assert_not_reached (); + } + + goto done; + } + size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg); if (atype == ATYPE_SMALL) { /* size_var = size_arg */ @@ -1330,12 +1371,16 @@ create_allocator (int atype) /* return p */ mono_mb_emit_ldloc (mb, p_var); + + done: mono_mb_emit_byte (mb, CEE_RET); #endif res = mono_mb_create_method (mb, csig, 8); mono_mb_free (mb); +#ifndef DISABLE_JIT mono_method_get_header (res)->init_locals = FALSE; +#endif info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo)); info->gc_name = "sgen"; @@ -1372,17 +1417,17 @@ mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean know return NULL; if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE) return NULL; - if (klass->has_finalize || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)) + if (mono_class_has_finalizer (klass) || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)) return NULL; if (klass->rank) return NULL; if (klass->byval_arg.type == MONO_TYPE_STRING) - return mono_gc_get_managed_allocator_by_type (ATYPE_STRING); + return mono_gc_get_managed_allocator_by_type (ATYPE_STRING, FALSE); /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */ if (known_instance_size) - return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL); + return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL, FALSE); else - return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL); + return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL, FALSE); #else return NULL; #endif @@ -1402,7 +1447,7 @@ mono_gc_get_managed_array_allocator (MonoClass *klass) return NULL; g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass)); - return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR); + return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR, FALSE); #else return NULL; #endif @@ -1415,10 +1460,11 @@ sgen_set_use_managed_allocator (gboolean flag) } MonoMethod* -mono_gc_get_managed_allocator_by_type (int atype) +mono_gc_get_managed_allocator_by_type (int atype, gboolean slowpath) { #ifdef MANAGED_ALLOCATION MonoMethod *res; + MonoMethod **cache = slowpath ? slowpath_alloc_method_cache : alloc_method_cache; if (!use_managed_allocator) return NULL; @@ -1426,18 +1472,18 @@ mono_gc_get_managed_allocator_by_type (int atype) if (!mono_runtime_has_tls_get ()) return NULL; - res = alloc_method_cache [atype]; + res = cache [atype]; if (res) return res; - res = create_allocator (atype); + res = create_allocator (atype, slowpath); LOCK_GC; - if (alloc_method_cache [atype]) { + if (cache [atype]) { mono_free_method (res); - res = alloc_method_cache [atype]; + res = cache [atype]; } else { mono_memory_barrier (); - alloc_method_cache [atype] = res; + cache [atype] = res; } UNLOCK_GC; @@ -1459,7 +1505,7 @@ sgen_is_managed_allocator (MonoMethod *method) int i; for (i = 0; i < ATYPE_NUM; ++i) - if (method == alloc_method_cache [i]) + if (method == alloc_method_cache [i] || method == slowpath_alloc_method_cache [i]) return TRUE; return FALSE; } @@ -1470,7 +1516,7 @@ sgen_has_managed_allocator (void) int i; for (i = 0; i < ATYPE_NUM; ++i) - if (alloc_method_cache [i]) + if (alloc_method_cache [i] || slowpath_alloc_method_cache [i]) return TRUE; return FALSE; } @@ -1539,23 +1585,25 @@ find_next_card (guint8 *card_data, guint8 *end) #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size)) gboolean -sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx) +sgen_client_cardtable_scan_object (GCObject *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx) { - MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj); + MonoVTable *vt = SGEN_LOAD_VTABLE (obj); MonoClass *klass = vt->klass; - SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES ((GCVTable*)vt), "Why would we ever call this on reference-free objects?"); + SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt), "Why would we ever call this on reference-free objects?"); if (vt->rank) { + MonoArray *arr = (MonoArray*)obj; guint8 *card_data, *card_base; guint8 *card_data_end; char *obj_start = sgen_card_table_align_pointer (obj); - mword obj_size = sgen_client_par_object_get_size (vt, (GCObject*)obj); - char *obj_end = obj + obj_size; + mword bounds_size; + mword obj_size = sgen_mono_array_size (vt, arr, &bounds_size, sgen_vtable_get_descriptor (vt)); + /* We don't want to scan the bounds entries at the end of multidimensional arrays */ + char *obj_end = (char*)obj + obj_size - bounds_size; size_t card_count; size_t extra_idx = 0; - MonoArray *arr = (MonoArray*)obj; mword desc = (mword)klass->element_class->gc_descr; int elem_size = mono_array_element_size (klass); @@ -1624,7 +1672,7 @@ LOOP_HEAD: gpointer new, old = *(gpointer*)elem; if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) { HEAVY_STAT (++los_array_remsets); - copy_func ((void**)elem, ctx.queue); + copy_func ((GCObject**)elem, ctx.queue); new = *(gpointer*)elem; if (G_UNLIKELY (sgen_ptr_in_nursery (new))) sgen_add_to_global_remset (elem, new); @@ -1665,7 +1713,7 @@ mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length) #ifndef DISABLE_CRITICAL_REGION ENTER_CRITICAL_REGION; - arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size); + arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size); if (arr) { /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/ arr->max_length = (mono_array_size_t)max_length; @@ -1677,7 +1725,7 @@ mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length) LOCK_GC; - arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size); + arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size); if (G_UNLIKELY (!arr)) { UNLOCK_GC; return mono_gc_out_of_memory (size); @@ -1688,7 +1736,10 @@ mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length) UNLOCK_GC; done: - SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Vector has incorrect size."); + if (G_UNLIKELY (alloc_events)) + mono_profiler_allocation (&arr->obj); + + SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Vector has incorrect size."); return arr; } @@ -1704,7 +1755,7 @@ mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uint #ifndef DISABLE_CRITICAL_REGION ENTER_CRITICAL_REGION; - arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size); + arr = (MonoArray*)sgen_try_alloc_obj_nolock (vtable, size); if (arr) { /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/ arr->max_length = (mono_array_size_t)max_length; @@ -1719,7 +1770,7 @@ mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uint LOCK_GC; - arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size); + arr = (MonoArray*)sgen_alloc_obj_nolock (vtable, size); if (G_UNLIKELY (!arr)) { UNLOCK_GC; return mono_gc_out_of_memory (size); @@ -1733,7 +1784,10 @@ mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uint UNLOCK_GC; done: - SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Array has incorrect size."); + if (G_UNLIKELY (alloc_events)) + mono_profiler_allocation (&arr->obj); + + SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (GCObject*)arr)), "Array has incorrect size."); return arr; } @@ -1748,19 +1802,19 @@ mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len) #ifndef DISABLE_CRITICAL_REGION ENTER_CRITICAL_REGION; - str = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size); + str = (MonoString*)sgen_try_alloc_obj_nolock (vtable, size); if (str) { /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/ str->length = len; EXIT_CRITICAL_REGION; - return str; + goto done; } EXIT_CRITICAL_REGION; #endif LOCK_GC; - str = sgen_alloc_obj_nolock ((GCVTable*)vtable, size); + str = (MonoString*)sgen_alloc_obj_nolock (vtable, size); if (G_UNLIKELY (!str)) { UNLOCK_GC; return mono_gc_out_of_memory (size); @@ -1770,6 +1824,10 @@ mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len) UNLOCK_GC; + done: + if (G_UNLIKELY (alloc_events)) + mono_profiler_allocation (&str->object); + return str; } @@ -1786,7 +1844,7 @@ mono_gc_set_string_length (MonoString *str, gint32 new_length) * the space to be reclaimed by SGen. */ if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) { - CHECK_CANARY_FOR_OBJECT (str); + CHECK_CANARY_FOR_OBJECT ((GCObject*)str, TRUE); memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE); memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE); } else { @@ -1824,7 +1882,7 @@ add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t ex notify_gc_roots (report); report->objects [report->count] = object; report->root_types [report->count] = rtype; - report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)SGEN_LOAD_VTABLE (object))->klass; + report->extra_info [report->count++] = (uintptr_t)SGEN_LOAD_VTABLE (object)->klass; } void @@ -1866,7 +1924,7 @@ report_finalizer_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *cri static GCRootReport *root_report; static void -single_arg_report_root (void **obj, void *gc_data) +single_arg_report_root (MonoObject **obj, void *gc_data) { if (*obj) add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0); @@ -1906,9 +1964,9 @@ precisely_report_roots_from (GCRootReport *report, void** start_root, void** end break; } case ROOT_DESC_USER: { - MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc); + MonoGCRootMarkFunc marker = (MonoGCRootMarkFunc)sgen_get_user_descriptor_func (desc); root_report = report; - marker (start_root, single_arg_report_root, NULL); + marker ((MonoObject**)start_root, single_arg_report_root, NULL); break; } case ROOT_DESC_RUN_LEN: @@ -1959,10 +2017,10 @@ sgen_client_collecting_major_1 (void) } void -sgen_client_pinned_los_object (char *obj) +sgen_client_pinned_los_object (GCObject *obj) { if (profile_roots) - add_profile_gc_root (&major_root_report, obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0); + add_profile_gc_root (&major_root_report, (char*)obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0); } void @@ -2037,22 +2095,23 @@ typedef struct { } while (0) static void -collect_references (HeapWalkInfo *hwi, char *start, size_t size) +collect_references (HeapWalkInfo *hwi, GCObject *obj, size_t size) { - mword desc = sgen_obj_get_descriptor (start); + char *start = (char*)obj; + mword desc = sgen_obj_get_descriptor (obj); #include "sgen/sgen-scan-object.h" } static void -walk_references (char *start, size_t size, void *data) +walk_references (GCObject *start, size_t size, void *data) { HeapWalkInfo *hwi = data; hwi->called = 0; hwi->count = 0; collect_references (hwi, start, size); if (hwi->count || !hwi->called) - hwi->callback ((MonoObject*)start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); + hwi->callback (start, mono_object_class (start), hwi->called? 0: size, hwi->count, hwi->refs, hwi->offsets, hwi->data); } /** @@ -2084,7 +2143,7 @@ mono_gc_walk_heap (int flags, MonoGCReferences callback, void *data) hwi.data = data; sgen_clear_nursery_fragments (); - sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE); + sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data, walk_references, &hwi, FALSE, TRUE); major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, walk_references, &hwi); sgen_los_iterate_objects (walk_references, &hwi); @@ -2262,7 +2321,7 @@ void* mono_gc_scan_object (void *obj, void *gc_data) { ScanCopyContext *ctx = gc_data; - ctx->ops->copy_or_mark_object (&obj, ctx->queue); + ctx->ops->copy_or_mark_object ((GCObject**)&obj, ctx->queue); return obj; } @@ -2279,6 +2338,8 @@ sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean p FOREACH_THREAD (info) { int skip_reason = 0; + void *aligned_stack_start = (void*)(mword) ALIGN_TO ((mword)info->client_info.stack_start, SIZEOF_VOID_P); + if (info->client_info.skip) { SGEN_LOG (3, "Skipping dead thread %p, range: %p-%p, size: %zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start); skip_reason = 1; @@ -2298,13 +2359,13 @@ sgen_client_scan_thread_data (void *start_nursery, void *end_nursery, gboolean p g_assert (info->client_info.suspend_done); SGEN_LOG (3, "Scanning thread %p, range: %p-%p, size: %zd, pinned=%zd", info, info->client_info.stack_start, info->client_info.stack_end, (char*)info->client_info.stack_end - (char*)info->client_info.stack_start, sgen_get_pinned_count ()); if (mono_gc_get_gc_callbacks ()->thread_mark_func && !conservative_stack_mark) { - mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, info->client_info.stack_start, info->client_info.stack_end, precise, &ctx); + mono_gc_get_gc_callbacks ()->thread_mark_func (info->client_info.runtime_data, aligned_stack_start, info->client_info.stack_end, precise, &ctx); } else if (!precise) { if (!conservative_stack_mark) { fprintf (stderr, "Precise stack mark not supported - disabling.\n"); conservative_stack_mark = TRUE; } - sgen_conservatively_pin_objects_from (info->client_info.stack_start, info->client_info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK); + sgen_conservatively_pin_objects_from (aligned_stack_start, info->client_info.stack_end, start_nursery, end_nursery, PIN_TYPE_STACK); } if (!precise) { @@ -2344,15 +2405,15 @@ mono_gc_set_stack_end (void *stack_end) */ int -mono_gc_register_root (char *start, size_t size, void *descr) +mono_gc_register_root (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg) { - return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED); + return sgen_register_root (start, size, descr, descr ? ROOT_TYPE_NORMAL : ROOT_TYPE_PINNED, source, msg); } int -mono_gc_register_root_wbarrier (char *start, size_t size, void *descr) +mono_gc_register_root_wbarrier (char *start, size_t size, MonoGCDescriptor descr, MonoGCRootSource source, const char *msg) { - return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER); + return sgen_register_root (start, size, descr, ROOT_TYPE_WBARRIER, source, msg); } void @@ -2480,16 +2541,16 @@ mono_gc_get_heap_size (void) return (int64_t)sgen_gc_get_total_heap_allocation (); } -void* +MonoGCDescriptor mono_gc_make_root_descr_user (MonoGCRootMarkFunc marker) { return sgen_make_user_root_descriptor (marker); } -void* +MonoGCDescriptor mono_gc_make_descr_for_string (gsize *bitmap, int numbits) { - return (void*)SGEN_DESC_STRING; + return SGEN_DESC_STRING; } void* @@ -2506,22 +2567,109 @@ mono_gc_get_los_limit (void) return SGEN_MAX_SMALL_OBJ_SIZE; } +gpointer +sgen_client_default_metadata (void) +{ + return mono_domain_get (); +} + +gpointer +sgen_client_metadata_for_object (GCObject *obj) +{ + return mono_object_domain (obj); +} + +/** + * mono_gchandle_is_in_domain: + * @gchandle: a GCHandle's handle. + * @domain: An application domain. + * + * Returns: true if the object wrapped by the @gchandle belongs to the specific @domain. + */ +gboolean +mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain) +{ + MonoDomain *gchandle_domain = sgen_gchandle_get_metadata (gchandle); + return domain->domain_id == gchandle_domain->domain_id; +} + +/** + * mono_gchandle_free_domain: + * @unloading: domain that is unloading + * + * Function used internally to cleanup any GC handle for objects belonging + * to the specified domain during appdomain unload. + */ void -mono_gc_weak_link_add (void **link_addr, MonoObject *obj, gboolean track) +mono_gchandle_free_domain (MonoDomain *unloading) { - sgen_register_disappearing_link (obj, link_addr, track, FALSE); +} + +static gpointer +null_link_if_in_domain (gpointer hidden, GCHandleType handle_type, int max_generation, gpointer user) +{ + MonoDomain *unloading_domain = user; + MonoDomain *obj_domain; + gboolean is_weak = MONO_GC_HANDLE_TYPE_IS_WEAK (handle_type); + if (MONO_GC_HANDLE_IS_OBJECT_POINTER (hidden)) { + MonoObject *obj = MONO_GC_REVEAL_POINTER (hidden, is_weak); + obj_domain = mono_object_domain (obj); + } else { + obj_domain = MONO_GC_REVEAL_POINTER (hidden, is_weak); + } + if (unloading_domain->domain_id == obj_domain->domain_id) + return NULL; + return hidden; } void -mono_gc_weak_link_remove (void **link_addr, gboolean track) +sgen_null_links_for_domain (MonoDomain *domain) { - sgen_register_disappearing_link (NULL, link_addr, track, FALSE); + guint type; + for (type = HANDLE_TYPE_MIN; type < HANDLE_TYPE_MAX; ++type) + sgen_gchandle_iterate (type, GENERATION_OLD, null_link_if_in_domain, domain); } -MonoObject* -mono_gc_weak_link_get (void **link_addr) +void +mono_gchandle_set_target (guint32 gchandle, MonoObject *obj) { - return sgen_weak_link_get (link_addr); + sgen_gchandle_set_target (gchandle, obj); +} + +void +sgen_client_gchandle_created (int handle_type, GCObject *obj, guint32 handle) +{ +#ifndef DISABLE_PERFCOUNTERS + mono_perfcounters->gc_num_handles++; +#endif + mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handle_type, handle, obj); +} + +void +sgen_client_gchandle_destroyed (int handle_type, guint32 handle) +{ +#ifndef DISABLE_PERFCOUNTERS + mono_perfcounters->gc_num_handles--; +#endif + mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handle_type, handle, NULL); +} + +void +sgen_client_ensure_weak_gchandles_accessible (void) +{ + /* + * During the second bridge processing step the world is + * running again. That step processes all weak links once + * more to null those that refer to dead objects. Before that + * is completed, those links must not be followed, so we + * conservatively wait for bridge processing when any weak + * link is dereferenced. + */ + /* FIXME: A GC can occur after this check fails, in which case we + * should wait for bridge processing but would fail to do so. + */ + if (G_UNLIKELY (bridge_processing_in_progress)) + mono_gc_wait_for_bridge_processing (); } gboolean @@ -2646,23 +2794,20 @@ sgen_client_pre_collection_checks (void) } gboolean -sgen_client_vtable_is_inited (GCVTable *gc_vtable) +sgen_client_vtable_is_inited (MonoVTable *vt) { - MonoVTable *vt = (MonoVTable*)gc_vtable; return vt->klass->inited; } const char* -sgen_client_vtable_get_namespace (GCVTable *gc_vtable) +sgen_client_vtable_get_namespace (MonoVTable *vt) { - MonoVTable *vt = (MonoVTable*)gc_vtable; return vt->klass->name_space; } const char* -sgen_client_vtable_get_name (GCVTable *gc_vtable) +sgen_client_vtable_get_name (MonoVTable *vt) { - MonoVTable *vt = (MonoVTable*)gc_vtable; return vt->klass->name; } @@ -2695,7 +2840,7 @@ sgen_client_init (void) #ifndef HAVE_KW_THREAD mono_native_tls_alloc (&thread_info_key, NULL); -#if defined(__APPLE__) || defined (HOST_WIN32) +#if defined(TARGET_OSX) || defined(TARGET_WIN32) || defined(TARGET_ANDROID) || defined(TARGET_IOS) /* * CEE_MONO_TLS requires the tls offset, not the key, so the code below only works on darwin, * where the two are the same. @@ -2823,4 +2968,15 @@ mono_gc_base_init (void) sgen_set_use_managed_allocator (FALSE); } +void +mono_gc_base_cleanup (void) +{ +} + +gboolean +mono_gc_is_null (void) +{ + return FALSE; +} + #endif