2 * sgen-mono.c: SGen features specific to Mono.
4 * Copyright (C) 2014 Xamarin Inc
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License 2.0 as published by the Free Software Foundation;
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License 2.0 along with this library; if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "metadata/sgen-gc.h"
24 #include "metadata/sgen-protocol.h"
25 #include "metadata/monitor.h"
26 #include "metadata/sgen-layout-stats.h"
27 #include "metadata/sgen-client.h"
28 #include "metadata/sgen-cardtable.h"
29 #include "metadata/marshal.h"
30 #include "metadata/method-builder.h"
31 #include "metadata/abi-details.h"
32 #include "metadata/profiler-private.h"
33 #include "utils/mono-memory-model.h"
35 /* If set, check that there are no references to the domain left at domain unload */
36 gboolean sgen_mono_xdomain_checks = FALSE;
38 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
45 ptr_on_stack (void *ptr)
47 gpointer stack_start = &stack_start;
48 SgenThreadInfo *info = mono_thread_info_current ();
50 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
55 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
57 #define HANDLE_PTR(ptr,obj) do { \
58 gpointer o = *(gpointer*)(ptr); \
60 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
61 binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
66 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
68 #define SCAN_OBJECT_NOVTABLE
69 #include "sgen-scan-object.h"
74 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
76 HEAVY_STAT (++stat_wbarrier_value_copy);
77 g_assert (klass->valuetype);
79 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
81 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !sgen_gc_descr_has_references ((mword)klass->gc_descr)) {
82 size_t element_size = mono_class_value_size (klass, NULL);
83 size_t size = count * element_size;
84 mono_gc_memmove_atomic (dest, src, size);
88 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
89 if (binary_protocol_is_heavy_enabled ()) {
90 size_t element_size = mono_class_value_size (klass, NULL);
92 for (i = 0; i < count; ++i) {
93 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
94 (char*)src + i * element_size - sizeof (MonoObject),
95 (mword) klass->gc_descr);
100 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
104 * mono_gc_wbarrier_object_copy:
106 * Write barrier to call when obj is the result of a clone or copy of an object.
109 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
113 HEAVY_STAT (++stat_wbarrier_object_copy);
115 if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj)) {
116 size = mono_object_class (obj)->instance_size;
117 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
118 size - sizeof (MonoObject));
122 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
123 if (binary_protocol_is_heavy_enabled ())
124 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
127 sgen_get_remset ()->wbarrier_object_copy ((GCObject*)obj, (GCObject*)src);
131 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
133 HEAVY_STAT (++stat_wbarrier_set_arrayref);
134 if (sgen_ptr_in_nursery (slot_ptr)) {
135 *(void**)slot_ptr = value;
138 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
140 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
142 sgen_get_remset ()->wbarrier_set_field ((GCObject*)arr, slot_ptr, (GCObject*)value);
146 * Dummy filler objects
149 /* Vtable of the objects used to fill out nursery fragments before a collection */
150 static GCVTable *array_fill_vtable;
153 sgen_client_get_array_fill_vtable (void)
155 if (!array_fill_vtable) {
156 static MonoClass klass;
157 static char _vtable[sizeof(MonoVTable)+8];
158 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
161 MonoDomain *domain = mono_get_root_domain ();
164 klass.element_class = mono_defaults.byte_class;
166 klass.instance_size = sizeof (MonoArray);
167 klass.sizes.element_size = 1;
168 klass.name = "array_filler_type";
170 vtable->klass = &klass;
172 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
175 array_fill_vtable = (GCVTable*)vtable;
177 return array_fill_vtable;
181 sgen_client_array_fill_range (char *start, size_t size)
185 if (size < sizeof (MonoArray)) {
186 memset (start, 0, size);
190 o = (MonoArray*)start;
191 o->obj.vtable = (MonoVTable*)sgen_client_get_array_fill_vtable ();
192 /* Mark this as not a real object */
193 o->obj.synchronisation = GINT_TO_POINTER (-1);
195 o->max_length = (mono_array_size_t)(size - sizeof (MonoArray));
201 sgen_client_zero_array_fill_header (void *p, size_t size)
203 if (size >= sizeof (MonoArray)) {
204 memset (p, 0, sizeof (MonoArray));
206 static guint8 zeros [sizeof (MonoArray)];
208 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
216 static MonoGCFinalizerCallbacks fin_callbacks;
219 mono_gc_get_vtable_bits (MonoClass *class)
222 /* FIXME move this to the bridge code */
223 if (sgen_need_bridge_processing ()) {
224 switch (sgen_bridge_class_kind (class)) {
225 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
226 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
227 res = SGEN_GC_BIT_BRIDGE_OBJECT;
229 case GC_BRIDGE_OPAQUE_CLASS:
230 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
232 case GC_BRIDGE_TRANSPARENT_CLASS:
236 if (fin_callbacks.is_class_finalization_aware) {
237 if (fin_callbacks.is_class_finalization_aware (class))
238 res |= SGEN_GC_BIT_FINALIZER_AWARE;
244 is_finalization_aware (MonoObject *obj)
246 MonoVTable *vt = ((MonoVTable*)SGEN_LOAD_VTABLE (obj));
247 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
251 sgen_client_object_queued_for_finalization (GCObject *gc_obj)
253 MonoObject *obj = (MonoObject*)gc_obj;
254 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
255 fin_callbacks.object_queued_for_finalization (obj);
259 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
261 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
262 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
264 fin_callbacks = *callbacks;
271 typedef struct _EphemeronLinkNode EphemeronLinkNode;
273 struct _EphemeronLinkNode {
274 EphemeronLinkNode *next;
283 static EphemeronLinkNode *ephemeron_list;
285 /* LOCKING: requires that the GC lock is held */
287 null_ephemerons_for_domain (MonoDomain *domain)
289 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
292 MonoObject *object = (MonoObject*)current->array;
295 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
297 if (object && object->vtable->domain == domain) {
298 EphemeronLinkNode *tmp = current;
301 prev->next = current->next;
303 ephemeron_list = current->next;
305 current = current->next;
306 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
309 current = current->next;
314 /* LOCKING: requires that the GC lock is held */
316 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
318 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
319 SgenGrayQueue *queue = ctx.queue;
320 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
322 Ephemeron *cur, *array_end;
326 char *object = current->array;
328 if (!sgen_is_object_alive_for_current_gen (object)) {
329 EphemeronLinkNode *tmp = current;
331 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
334 prev->next = current->next;
336 ephemeron_list = current->next;
338 current = current->next;
339 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
344 copy_func ((void**)&object, queue);
345 current->array = object;
347 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
349 array = (MonoArray*)object;
350 cur = mono_array_addr (array, Ephemeron, 0);
351 array_end = cur + mono_array_length_fast (array);
352 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
354 for (; cur < array_end; ++cur) {
355 char *key = (char*)cur->key;
357 if (!key || key == tombstone)
360 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
361 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
362 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
364 if (!sgen_is_object_alive_for_current_gen (key)) {
365 cur->key = tombstone;
371 current = current->next;
376 LOCKING: requires that the GC lock is held
378 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
381 sgen_client_mark_ephemerons (ScanCopyContext ctx)
383 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
384 SgenGrayQueue *queue = ctx.queue;
385 gboolean nothing_marked = TRUE;
386 EphemeronLinkNode *current = ephemeron_list;
388 Ephemeron *cur, *array_end;
391 for (current = ephemeron_list; current; current = current->next) {
392 char *object = current->array;
393 SGEN_LOG (5, "Ephemeron array at %p", object);
395 /*It has to be alive*/
396 if (!sgen_is_object_alive_for_current_gen (object)) {
397 SGEN_LOG (5, "\tnot reachable");
401 copy_func ((void**)&object, queue);
403 array = (MonoArray*)object;
404 cur = mono_array_addr (array, Ephemeron, 0);
405 array_end = cur + mono_array_length_fast (array);
406 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
408 for (; cur < array_end; ++cur) {
409 char *key = cur->key;
411 if (!key || key == tombstone)
414 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
415 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
416 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
418 if (sgen_is_object_alive_for_current_gen (key)) {
419 char *value = cur->value;
421 copy_func ((void**)&cur->key, queue);
423 if (!sgen_is_object_alive_for_current_gen (value))
424 nothing_marked = FALSE;
425 copy_func ((void**)&cur->value, queue);
431 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
432 return nothing_marked;
436 mono_gc_ephemeron_array_add (MonoObject *obj)
438 EphemeronLinkNode *node;
442 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
447 node->array = (char*)obj;
448 node->next = ephemeron_list;
449 ephemeron_list = node;
451 SGEN_LOG (5, "Registered ephemeron array %p", obj);
462 need_remove_object_for_domain (char *start, MonoDomain *domain)
464 if (mono_object_domain (start) == domain) {
465 SGEN_LOG (4, "Need to cleanup object %p", start);
466 binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((GCObject*)start));
473 process_object_for_domain_clearing (char *start, MonoDomain *domain)
475 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (start);
476 if (vt->klass == mono_defaults.internal_thread_class)
477 g_assert (mono_object_domain (start) == mono_get_root_domain ());
478 /* The object could be a proxy for an object in the domain
480 #ifndef DISABLE_REMOTING
481 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
482 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
484 /* The server could already have been zeroed out, so
485 we need to check for that, too. */
486 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
487 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
488 ((MonoRealProxy*)start)->unwrapped_server = NULL;
495 clear_domain_process_object (char *obj, MonoDomain *domain)
499 process_object_for_domain_clearing (obj, domain);
500 remove = need_remove_object_for_domain (obj, domain);
502 if (remove && ((MonoObject*)obj)->synchronisation) {
503 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
505 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
512 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
514 if (clear_domain_process_object (obj, domain)) {
515 CANARIFY_SIZE (size);
516 memset (obj, 0, size);
521 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
523 clear_domain_process_object (obj, domain);
527 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
529 if (need_remove_object_for_domain (obj, domain))
530 major_collector.free_non_pinned_object (obj, size);
534 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
536 if (need_remove_object_for_domain (obj, domain))
537 major_collector.free_pinned_object (obj, size);
541 * When appdomains are unloaded we can easily remove objects that have finalizers,
542 * but all the others could still be present in random places on the heap.
543 * We need a sweep to get rid of them even though it's going to be costly
545 * The reason we need to remove them is because we access the vtable and class
546 * structures to know the object size and the reference bitmap: once the domain is
547 * unloaded the point to random memory.
550 mono_gc_clear_domain (MonoDomain * domain)
552 LOSObject *bigobj, *prev;
557 binary_protocol_domain_unload_begin (domain);
561 if (sgen_concurrent_collection_in_progress ())
562 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
563 SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
565 major_collector.finish_sweeping ();
567 sgen_process_fin_stage_entries ();
568 sgen_process_dislink_stage_entries ();
570 sgen_clear_nursery_fragments ();
572 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
573 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
574 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
575 sgen_check_for_xdomain_refs ();
578 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
579 to memory returned to the OS.*/
580 null_ephemerons_for_domain (domain);
582 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
583 sgen_null_links_for_domain (domain, i);
585 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
586 sgen_remove_finalizers_for_domain (domain, i);
588 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
589 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
591 /* We need two passes over major and large objects because
592 freeing such objects might give their memory back to the OS
593 (in the case of large objects) or obliterate its vtable
594 (pinned objects with major-copying or pinned and non-pinned
595 objects with major-mark&sweep), but we might need to
596 dereference a pointer from an object to another object if
597 the first object is a proxy. */
598 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
599 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
600 clear_domain_process_object (bigobj->data, domain);
603 for (bigobj = los_object_list; bigobj;) {
604 if (need_remove_object_for_domain (bigobj->data, domain)) {
605 LOSObject *to_free = bigobj;
607 prev->next = bigobj->next;
609 los_object_list = bigobj->next;
610 bigobj = bigobj->next;
611 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
612 sgen_los_free_object (to_free);
616 bigobj = bigobj->next;
618 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
619 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
621 if (domain == mono_get_root_domain ()) {
622 sgen_pin_stats_print_class_stats ();
623 sgen_object_layout_dump (stdout);
626 sgen_restart_world (0, NULL);
628 binary_protocol_domain_unload_end (domain);
629 binary_protocol_flush_buffers (FALSE);
638 static MonoMethod* alloc_method_cache [ATYPE_NUM];
639 static gboolean use_managed_allocator = TRUE;
641 #ifdef MANAGED_ALLOCATION
642 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
646 #include "mono/cil/opcode.def"
652 #ifdef HAVE_KW_THREAD
654 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
655 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
656 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
657 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
660 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
661 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
662 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
663 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
668 #if defined(__APPLE__) || defined (HOST_WIN32)
669 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
670 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
671 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
672 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
673 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
674 mono_mb_emit_byte ((mb), CEE_ADD); \
675 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
678 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
679 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
680 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
681 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
682 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_temp_end)); \
683 mono_mb_emit_byte ((mb), CEE_ADD); \
684 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
688 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
689 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
694 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
695 * for each class. This is currently not easy to do, as it is hard to generate basic
696 * blocks + branches, but it is easy with the linear IL codebase.
698 * For this to work we'd need to solve the TLAB race, first. Now we
699 * require the allocator to be in a few known methods to make sure
700 * that they are executed atomically via the restart mechanism.
703 create_allocator (int atype)
706 guint32 slowpath_branch, max_size_branch;
707 MonoMethodBuilder *mb;
709 MonoMethodSignature *csig;
710 static gboolean registered = FALSE;
711 int tlab_next_addr_var, new_next_var;
713 const char *name = NULL;
714 AllocatorWrapperInfo *info;
717 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
718 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
719 mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
723 if (atype == ATYPE_SMALL) {
726 } else if (atype == ATYPE_NORMAL) {
729 } else if (atype == ATYPE_VECTOR) {
731 name = "AllocVector";
732 } else if (atype == ATYPE_STRING) {
734 name = "AllocString";
736 g_assert_not_reached ();
739 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
740 if (atype == ATYPE_STRING) {
741 csig->ret = &mono_defaults.string_class->byval_arg;
742 csig->params [0] = &mono_defaults.int_class->byval_arg;
743 csig->params [1] = &mono_defaults.int32_class->byval_arg;
745 csig->ret = &mono_defaults.object_class->byval_arg;
746 for (i = 0; i < num_params; ++i)
747 csig->params [i] = &mono_defaults.int_class->byval_arg;
750 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
753 size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
754 if (atype == ATYPE_SMALL) {
755 /* size_var = size_arg */
756 mono_mb_emit_ldarg (mb, 1);
757 mono_mb_emit_stloc (mb, size_var);
758 } else if (atype == ATYPE_NORMAL) {
759 /* size = vtable->klass->instance_size; */
760 mono_mb_emit_ldarg (mb, 0);
761 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
762 mono_mb_emit_byte (mb, CEE_ADD);
763 mono_mb_emit_byte (mb, CEE_LDIND_I);
764 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, instance_size));
765 mono_mb_emit_byte (mb, CEE_ADD);
766 /* FIXME: assert instance_size stays a 4 byte integer */
767 mono_mb_emit_byte (mb, CEE_LDIND_U4);
768 mono_mb_emit_byte (mb, CEE_CONV_I);
769 mono_mb_emit_stloc (mb, size_var);
770 } else if (atype == ATYPE_VECTOR) {
771 MonoExceptionClause *clause;
772 int pos, pos_leave, pos_error;
773 MonoClass *oom_exc_class;
777 * n > MONO_ARRAY_MAX_INDEX => OutOfMemoryException
778 * n < 0 => OverflowException
780 * We can do an unsigned comparison to catch both cases, then in the error
781 * case compare signed to distinguish between them.
783 mono_mb_emit_ldarg (mb, 1);
784 mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
785 mono_mb_emit_byte (mb, CEE_CONV_U);
786 pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
788 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
789 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
790 mono_mb_emit_ldarg (mb, 1);
791 mono_mb_emit_icon (mb, 0);
792 pos_error = mono_mb_emit_short_branch (mb, CEE_BLT_S);
793 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
794 mono_mb_patch_short_branch (mb, pos_error);
795 mono_mb_emit_exception (mb, "OverflowException", NULL);
797 mono_mb_patch_short_branch (mb, pos);
799 clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
800 clause->try_offset = mono_mb_get_label (mb);
802 /* vtable->klass->sizes.element_size */
803 mono_mb_emit_ldarg (mb, 0);
804 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
805 mono_mb_emit_byte (mb, CEE_ADD);
806 mono_mb_emit_byte (mb, CEE_LDIND_I);
807 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, sizes));
808 mono_mb_emit_byte (mb, CEE_ADD);
809 mono_mb_emit_byte (mb, CEE_LDIND_U4);
810 mono_mb_emit_byte (mb, CEE_CONV_I);
813 mono_mb_emit_ldarg (mb, 1);
814 mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
815 /* + sizeof (MonoArray) */
816 mono_mb_emit_icon (mb, sizeof (MonoArray));
817 mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
818 mono_mb_emit_stloc (mb, size_var);
820 pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
823 clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
824 clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
825 clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
826 "System", "OverflowException");
827 g_assert (clause->data.catch_class);
828 clause->handler_offset = mono_mb_get_label (mb);
830 oom_exc_class = mono_class_from_name (mono_defaults.corlib,
831 "System", "OutOfMemoryException");
832 g_assert (oom_exc_class);
833 ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
836 mono_mb_emit_byte (mb, CEE_POP);
837 mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
838 mono_mb_emit_byte (mb, CEE_THROW);
840 clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
841 mono_mb_set_clauses (mb, 1, clause);
842 mono_mb_patch_branch (mb, pos_leave);
844 } else if (atype == ATYPE_STRING) {
848 * a string allocator method takes the args: (vtable, len)
850 * bytes = offsetof (MonoString, chars) + ((len + 1) * 2)
854 * bytes <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
858 * offsetof (MonoString, chars) + ((len + 1) * 2) <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
859 * len <= (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - offsetof (MonoString, chars)) / 2 - 1
861 mono_mb_emit_ldarg (mb, 1);
862 mono_mb_emit_icon (mb, (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - MONO_STRUCT_OFFSET (MonoString, chars)) / 2 - 1);
863 pos = mono_mb_emit_short_branch (mb, MONO_CEE_BLE_UN_S);
865 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
866 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
867 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
868 mono_mb_patch_short_branch (mb, pos);
870 mono_mb_emit_ldarg (mb, 1);
871 mono_mb_emit_icon (mb, 1);
872 mono_mb_emit_byte (mb, MONO_CEE_SHL);
873 //WE manually fold the above + 2 here
874 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, chars) + 2);
875 mono_mb_emit_byte (mb, CEE_ADD);
876 mono_mb_emit_stloc (mb, size_var);
878 g_assert_not_reached ();
881 if (atype != ATYPE_SMALL) {
882 /* size += ALLOC_ALIGN - 1; */
883 mono_mb_emit_ldloc (mb, size_var);
884 mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
885 mono_mb_emit_byte (mb, CEE_ADD);
886 /* size &= ~(ALLOC_ALIGN - 1); */
887 mono_mb_emit_icon (mb, ~(SGEN_ALLOC_ALIGN - 1));
888 mono_mb_emit_byte (mb, CEE_AND);
889 mono_mb_emit_stloc (mb, size_var);
892 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
893 if (atype != ATYPE_SMALL) {
894 mono_mb_emit_ldloc (mb, size_var);
895 mono_mb_emit_icon (mb, SGEN_MAX_SMALL_OBJ_SIZE);
896 max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_UN_S);
900 * We need to modify tlab_next, but the JIT only supports reading, so we read
901 * another tls var holding its address instead.
904 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
905 tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
906 EMIT_TLS_ACCESS_NEXT_ADDR (mb);
907 mono_mb_emit_stloc (mb, tlab_next_addr_var);
909 /* p = (void**)tlab_next; */
910 p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
911 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
912 mono_mb_emit_byte (mb, CEE_LDIND_I);
913 mono_mb_emit_stloc (mb, p_var);
915 /* new_next = (char*)p + size; */
916 new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
917 mono_mb_emit_ldloc (mb, p_var);
918 mono_mb_emit_ldloc (mb, size_var);
919 mono_mb_emit_byte (mb, CEE_CONV_I);
920 mono_mb_emit_byte (mb, CEE_ADD);
921 mono_mb_emit_stloc (mb, new_next_var);
923 /* if (G_LIKELY (new_next < tlab_temp_end)) */
924 mono_mb_emit_ldloc (mb, new_next_var);
925 EMIT_TLS_ACCESS_TEMP_END (mb);
926 slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
929 if (atype != ATYPE_SMALL)
930 mono_mb_patch_short_branch (mb, max_size_branch);
932 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
933 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
935 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
936 mono_mb_emit_ldarg (mb, 0);
937 mono_mb_emit_ldloc (mb, size_var);
938 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
939 mono_mb_emit_icall (mb, mono_gc_alloc_obj);
940 } else if (atype == ATYPE_VECTOR) {
941 mono_mb_emit_ldarg (mb, 1);
942 mono_mb_emit_icall (mb, mono_gc_alloc_vector);
943 } else if (atype == ATYPE_STRING) {
944 mono_mb_emit_ldarg (mb, 1);
945 mono_mb_emit_icall (mb, mono_gc_alloc_string);
947 g_assert_not_reached ();
949 mono_mb_emit_byte (mb, CEE_RET);
952 mono_mb_patch_short_branch (mb, slowpath_branch);
954 /* FIXME: Memory barrier */
956 /* tlab_next = new_next */
957 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
958 mono_mb_emit_ldloc (mb, new_next_var);
959 mono_mb_emit_byte (mb, CEE_STIND_I);
961 /*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
962 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
963 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
964 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
967 mono_mb_emit_ldloc (mb, p_var);
968 mono_mb_emit_ldarg (mb, 0);
969 mono_mb_emit_byte (mb, CEE_STIND_I);
971 if (atype == ATYPE_VECTOR) {
972 /* arr->max_length = max_length; */
973 mono_mb_emit_ldloc (mb, p_var);
974 mono_mb_emit_ldflda (mb, MONO_STRUCT_OFFSET (MonoArray, max_length));
975 mono_mb_emit_ldarg (mb, 1);
976 #ifdef MONO_BIG_ARRAYS
977 mono_mb_emit_byte (mb, CEE_STIND_I);
979 mono_mb_emit_byte (mb, CEE_STIND_I4);
981 } else if (atype == ATYPE_STRING) {
982 /* need to set length and clear the last char */
983 /* s->length = len; */
984 mono_mb_emit_ldloc (mb, p_var);
985 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, length));
986 mono_mb_emit_byte (mb, MONO_CEE_ADD);
987 mono_mb_emit_ldarg (mb, 1);
988 mono_mb_emit_byte (mb, MONO_CEE_STIND_I4);
989 /* s->chars [len] = 0; */
990 mono_mb_emit_ldloc (mb, p_var);
991 mono_mb_emit_ldloc (mb, size_var);
992 mono_mb_emit_icon (mb, 2);
993 mono_mb_emit_byte (mb, MONO_CEE_SUB);
994 mono_mb_emit_byte (mb, MONO_CEE_ADD);
995 mono_mb_emit_icon (mb, 0);
996 mono_mb_emit_byte (mb, MONO_CEE_STIND_I2);
1000 We must make sure both vtable and max_length are globaly visible before returning to managed land.
1002 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1003 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
1004 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
1007 mono_mb_emit_ldloc (mb, p_var);
1008 mono_mb_emit_byte (mb, CEE_RET);
1011 res = mono_mb_create_method (mb, csig, 8);
1013 mono_method_get_header (res)->init_locals = FALSE;
1015 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
1016 info->gc_name = "sgen";
1017 info->alloc_type = atype;
1018 mono_marshal_set_wrapper_info (res, info);
1025 mono_gc_get_aligned_size_for_allocator (int size)
1027 int aligned_size = size;
1028 aligned_size += SGEN_ALLOC_ALIGN - 1;
1029 aligned_size &= ~(SGEN_ALLOC_ALIGN - 1);
1030 return aligned_size;
1034 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1035 * The signature of the called method is:
1036 * object allocate (MonoVTable *vtable)
1039 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1041 #ifdef MANAGED_ALLOCATION
1042 if (collect_before_allocs)
1044 if (!mono_runtime_has_tls_get ())
1046 if (klass->instance_size > tlab_size)
1048 if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1050 if (klass->has_finalize || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
1054 if (klass->byval_arg.type == MONO_TYPE_STRING)
1055 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING);
1056 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1057 if (known_instance_size)
1058 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
1060 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
1067 mono_gc_get_managed_array_allocator (MonoClass *klass)
1069 #ifdef MANAGED_ALLOCATION
1070 if (klass->rank != 1)
1072 if (!mono_runtime_has_tls_get ())
1074 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
1076 if (has_per_allocation_action)
1078 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1080 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
1087 sgen_set_use_managed_allocator (gboolean flag)
1089 use_managed_allocator = flag;
1093 mono_gc_get_managed_allocator_by_type (int atype)
1095 #ifdef MANAGED_ALLOCATION
1098 if (!use_managed_allocator)
1101 if (!mono_runtime_has_tls_get ())
1104 res = alloc_method_cache [atype];
1108 res = create_allocator (atype);
1110 if (alloc_method_cache [atype]) {
1111 mono_free_method (res);
1112 res = alloc_method_cache [atype];
1114 mono_memory_barrier ();
1115 alloc_method_cache [atype] = res;
1126 mono_gc_get_managed_allocator_types (void)
1132 sgen_is_managed_allocator (MonoMethod *method)
1136 for (i = 0; i < ATYPE_NUM; ++i)
1137 if (method == alloc_method_cache [i])
1143 sgen_has_managed_allocator (void)
1147 for (i = 0; i < ATYPE_NUM; ++i)
1148 if (alloc_method_cache [i])
1154 * Cardtable scanning
1157 #define MWORD_MASK (sizeof (mword) - 1)
1160 find_card_offset (mword card)
1162 /*XXX Use assembly as this generates some pretty bad code */
1163 #if defined(__i386__) && defined(__GNUC__)
1164 return (__builtin_ffs (card) - 1) / 8;
1165 #elif defined(__x86_64__) && defined(__GNUC__)
1166 return (__builtin_ffsll (card) - 1) / 8;
1167 #elif defined(__s390x__)
1168 return (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1171 guint8 *ptr = (guint8 *) &card;
1172 for (i = 0; i < sizeof (mword); ++i) {
1181 find_next_card (guint8 *card_data, guint8 *end)
1183 mword *cards, *cards_end;
1186 while ((((mword)card_data) & MWORD_MASK) && card_data < end) {
1192 if (card_data == end)
1195 cards = (mword*)card_data;
1196 cards_end = (mword*)((mword)end & ~MWORD_MASK);
1197 while (cards < cards_end) {
1200 return (guint8*)cards + find_card_offset (card);
1204 card_data = (guint8*)cards_end;
1205 while (card_data < end) {
1214 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1217 sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
1219 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1220 MonoClass *klass = vt->klass;
1222 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES ((GCVTable*)vt), "Why would we ever call this on reference-free objects?");
1225 guint8 *card_data, *card_base;
1226 guint8 *card_data_end;
1227 char *obj_start = sgen_card_table_align_pointer (obj);
1228 mword obj_size = sgen_client_par_object_get_size ((GCVTable*)vt, (GCObject*)obj);
1229 char *obj_end = obj + obj_size;
1231 size_t extra_idx = 0;
1233 MonoArray *arr = (MonoArray*)obj;
1234 mword desc = (mword)klass->element_class->gc_descr;
1235 int elem_size = mono_array_element_size (klass);
1237 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1238 guint8 *overflow_scan_end = NULL;
1241 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1242 if (klass->element_class->valuetype)
1243 sgen_object_layout_scanned_vtype_array ();
1245 sgen_object_layout_scanned_ref_array ();
1251 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1253 card_base = card_data;
1254 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1255 card_data_end = card_data + card_count;
1258 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1259 /*Check for overflow and if so, setup to scan in two steps*/
1260 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1261 overflow_scan_end = sgen_shadow_cardtable + (card_data_end - SGEN_SHADOW_CARDTABLE_END);
1262 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1268 card_data = find_next_card (card_data, card_data_end);
1269 for (; card_data < card_data_end; card_data = find_next_card (card_data + 1, card_data_end)) {
1271 size_t idx = (card_data - card_base) + extra_idx;
1272 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1273 char *card_end = start + CARD_SIZE_IN_BYTES;
1274 char *first_elem, *elem;
1276 HEAVY_STAT (++los_marked_cards);
1279 sgen_card_table_prepare_card_for_scanning (card_data);
1281 card_end = MIN (card_end, obj_end);
1283 if (start <= (char*)arr->vector)
1286 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1288 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1289 if (klass->element_class->valuetype) {
1290 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1292 for (; elem < card_end; elem += elem_size)
1293 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1295 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
1297 HEAVY_STAT (++los_array_cards);
1298 for (; elem < card_end; elem += SIZEOF_VOID_P) {
1299 gpointer new, old = *(gpointer*)elem;
1300 if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) {
1301 HEAVY_STAT (++los_array_remsets);
1302 copy_func ((void**)elem, ctx.queue);
1303 new = *(gpointer*)elem;
1304 if (G_UNLIKELY (sgen_ptr_in_nursery (new)))
1305 sgen_add_to_global_remset (elem, new);
1310 binary_protocol_card_scan (first_elem, elem - first_elem);
1313 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1314 if (overflow_scan_end) {
1315 extra_idx = card_data - card_base;
1316 card_base = card_data = sgen_shadow_cardtable;
1317 card_data_end = overflow_scan_end;
1318 overflow_scan_end = NULL;
1329 * Array and string allocation
1333 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1338 if (!SGEN_CAN_ALIGN_UP (size))
1341 #ifndef DISABLE_CRITICAL_REGION
1342 ENTER_CRITICAL_REGION;
1343 arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1345 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1346 arr->max_length = (mono_array_size_t)max_length;
1347 EXIT_CRITICAL_REGION;
1350 EXIT_CRITICAL_REGION;
1355 arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1356 if (G_UNLIKELY (!arr)) {
1358 return mono_gc_out_of_memory (size);
1361 arr->max_length = (mono_array_size_t)max_length;
1366 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Vector has incorrect size.");
1371 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1374 MonoArrayBounds *bounds;
1377 if (!SGEN_CAN_ALIGN_UP (size))
1380 #ifndef DISABLE_CRITICAL_REGION
1381 ENTER_CRITICAL_REGION;
1382 arr = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1384 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1385 arr->max_length = (mono_array_size_t)max_length;
1387 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1388 arr->bounds = bounds;
1389 EXIT_CRITICAL_REGION;
1392 EXIT_CRITICAL_REGION;
1397 arr = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1398 if (G_UNLIKELY (!arr)) {
1400 return mono_gc_out_of_memory (size);
1403 arr->max_length = (mono_array_size_t)max_length;
1405 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1406 arr->bounds = bounds;
1411 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size ((GCVTable*)vtable, (GCObject*)arr)), "Array has incorrect size.");
1416 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1421 if (!SGEN_CAN_ALIGN_UP (size))
1424 #ifndef DISABLE_CRITICAL_REGION
1425 ENTER_CRITICAL_REGION;
1426 str = sgen_try_alloc_obj_nolock ((GCVTable*)vtable, size);
1428 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1430 EXIT_CRITICAL_REGION;
1433 EXIT_CRITICAL_REGION;
1438 str = sgen_alloc_obj_nolock ((GCVTable*)vtable, size);
1439 if (G_UNLIKELY (!str)) {
1441 return mono_gc_out_of_memory (size);
1456 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1458 mono_unichar2 *new_end = str->chars + new_length;
1460 /* zero the discarded string. This null-delimits the string and allows
1461 * the space to be reclaimed by SGen. */
1463 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1464 CHECK_CANARY_FOR_OBJECT (str);
1465 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1466 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1468 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1471 str->length = new_length;
1478 #define GC_ROOT_NUM 32
1480 int count; /* must be the first field */
1481 void *objects [GC_ROOT_NUM];
1482 int root_types [GC_ROOT_NUM];
1483 uintptr_t extra_info [GC_ROOT_NUM];
1487 notify_gc_roots (GCRootReport *report)
1491 mono_profiler_gc_roots (report->count, report->objects, report->root_types, report->extra_info);
1496 add_profile_gc_root (GCRootReport *report, void *object, int rtype, uintptr_t extra_info)
1498 if (report->count == GC_ROOT_NUM)
1499 notify_gc_roots (report);
1500 report->objects [report->count] = object;
1501 report->root_types [report->count] = rtype;
1502 report->extra_info [report->count++] = (uintptr_t)((MonoVTable*)SGEN_LOAD_VTABLE (object))->klass;
1506 sgen_client_nursery_objects_pinned (void **definitely_pinned, int count)
1508 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS) {
1509 GCRootReport report;
1512 for (idx = 0; idx < count; ++idx)
1513 add_profile_gc_root (&report, definitely_pinned [idx], MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1514 notify_gc_roots (&report);
1519 report_finalizer_roots_from_queue (SgenPointerQueue *queue)
1521 GCRootReport report;
1525 for (i = 0; i < queue->next_slot; ++i) {
1526 void *obj = queue->data [i];
1529 add_profile_gc_root (&report, obj, MONO_PROFILE_GC_ROOT_FINALIZER, 0);
1531 notify_gc_roots (&report);
1535 report_finalizer_roots (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1537 report_finalizer_roots_from_queue (fin_ready_queue);
1538 report_finalizer_roots_from_queue (critical_fin_queue);
1541 static GCRootReport *root_report;
1544 single_arg_report_root (void **obj, void *gc_data)
1547 add_profile_gc_root (root_report, *obj, MONO_PROFILE_GC_ROOT_OTHER, 0);
1551 precisely_report_roots_from (GCRootReport *report, void** start_root, void** end_root, mword desc)
1553 switch (desc & ROOT_DESC_TYPE_MASK) {
1554 case ROOT_DESC_BITMAP:
1555 desc >>= ROOT_DESC_TYPE_SHIFT;
1557 if ((desc & 1) && *start_root) {
1558 add_profile_gc_root (report, *start_root, MONO_PROFILE_GC_ROOT_OTHER, 0);
1564 case ROOT_DESC_COMPLEX: {
1565 gsize *bitmap_data = sgen_get_complex_descriptor_bitmap (desc);
1566 gsize bwords = (*bitmap_data) - 1;
1567 void **start_run = start_root;
1569 while (bwords-- > 0) {
1570 gsize bmap = *bitmap_data++;
1571 void **objptr = start_run;
1573 if ((bmap & 1) && *objptr) {
1574 add_profile_gc_root (report, *objptr, MONO_PROFILE_GC_ROOT_OTHER, 0);
1579 start_run += GC_BITS_PER_WORD;
1583 case ROOT_DESC_USER: {
1584 MonoGCRootMarkFunc marker = sgen_get_user_descriptor_func (desc);
1585 root_report = report;
1586 marker (start_root, single_arg_report_root, NULL);
1589 case ROOT_DESC_RUN_LEN:
1590 g_assert_not_reached ();
1592 g_assert_not_reached ();
1597 report_registered_roots_by_type (int root_type)
1599 GCRootReport report;
1603 SGEN_HASH_TABLE_FOREACH (&roots_hash [root_type], start_root, root) {
1604 SGEN_LOG (6, "Precise root scan %p-%p (desc: %p)", start_root, root->end_root, (void*)root->root_desc);
1605 precisely_report_roots_from (&report, start_root, (void**)root->end_root, root->root_desc);
1606 } SGEN_HASH_TABLE_FOREACH_END;
1607 notify_gc_roots (&report);
1611 report_registered_roots (void)
1613 report_registered_roots_by_type (ROOT_TYPE_NORMAL);
1614 report_registered_roots_by_type (ROOT_TYPE_WBARRIER);
1618 sgen_client_collecting_minor (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1620 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1621 report_registered_roots ();
1622 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1623 report_finalizer_roots (fin_ready_queue, critical_fin_queue);
1626 static GCRootReport major_root_report;
1627 static gboolean profile_roots;
1630 sgen_client_collecting_major_1 (void)
1632 profile_roots = mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS;
1633 memset (&major_root_report, 0, sizeof (GCRootReport));
1637 sgen_client_pinned_los_object (char *obj)
1640 add_profile_gc_root (&major_root_report, obj, MONO_PROFILE_GC_ROOT_PINNING | MONO_PROFILE_GC_ROOT_MISC, 0);
1644 sgen_client_collecting_major_2 (void)
1647 notify_gc_roots (&major_root_report);
1649 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1650 report_registered_roots ();
1654 sgen_client_collecting_major_3 (SgenPointerQueue *fin_ready_queue, SgenPointerQueue *critical_fin_queue)
1656 if (mono_profiler_get_events () & MONO_PROFILE_GC_ROOTS)
1657 report_finalizer_roots (fin_ready_queue, critical_fin_queue);
1665 sgen_client_description_for_internal_mem_type (int type)
1668 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
1675 sgen_client_pre_collection_checks (void)
1677 if (sgen_mono_xdomain_checks) {
1678 sgen_clear_nursery_fragments ();
1679 sgen_check_for_xdomain_refs ();
1684 sgen_client_object_safe_name (GCObject *obj)
1686 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1687 return vt->klass->name;
1691 sgen_client_vtable_get_namespace (GCVTable *gc_vtable)
1693 MonoVTable *vt = (MonoVTable*)gc_vtable;
1694 return vt->klass->name_space;
1698 sgen_client_vtable_get_name (GCVTable *gc_vtable)
1700 MonoVTable *vt = (MonoVTable*)gc_vtable;
1701 return vt->klass->name;
1709 sgen_client_init (void)
1711 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
1715 sgen_client_handle_gc_debug (const char *opt)
1717 if (!strcmp (opt, "xdomain-checks")) {
1718 sgen_mono_xdomain_checks = TRUE;
1726 sgen_client_print_gc_debug_usage (void)
1728 fprintf (stderr, " xdomain-checks\n");