2 * sgen-mono.c: SGen features specific to Mono.
4 * Copyright (C) 2014 Xamarin Inc
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License 2.0 as published by the Free Software Foundation;
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License 2.0 along with this library; if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "metadata/sgen-gc.h"
24 #include "metadata/sgen-protocol.h"
25 #include "metadata/monitor.h"
26 #include "metadata/sgen-layout-stats.h"
27 #include "metadata/sgen-client.h"
28 #include "metadata/sgen-cardtable.h"
29 #include "metadata/marshal.h"
30 #include "metadata/method-builder.h"
31 #include "metadata/abi-details.h"
32 #include "metadata/profiler-private.h"
33 #include "utils/mono-memory-model.h"
35 /* If set, check that there are no references to the domain left at domain unload */
36 gboolean sgen_mono_xdomain_checks = FALSE;
38 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
45 ptr_on_stack (void *ptr)
47 gpointer stack_start = &stack_start;
48 SgenThreadInfo *info = mono_thread_info_current ();
50 if (ptr >= stack_start && ptr < (gpointer)info->stack_end)
55 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
57 #define HANDLE_PTR(ptr,obj) do { \
58 gpointer o = *(gpointer*)(ptr); \
60 gpointer d = ((char*)dest) + ((char*)(ptr) - (char*)(obj)); \
61 binary_protocol_wbarrier (d, o, (gpointer) SGEN_LOAD_VTABLE (o)); \
66 scan_object_for_binary_protocol_copy_wbarrier (gpointer dest, char *start, mword desc)
68 #define SCAN_OBJECT_NOVTABLE
69 #include "sgen-scan-object.h"
74 mono_gc_wbarrier_value_copy (gpointer dest, gpointer src, int count, MonoClass *klass)
76 HEAVY_STAT (++stat_wbarrier_value_copy);
77 g_assert (klass->valuetype);
79 SGEN_LOG (8, "Adding value remset at %p, count %d, descr %p for class %s (%p)", dest, count, klass->gc_descr, klass->name, klass);
81 if (sgen_ptr_in_nursery (dest) || ptr_on_stack (dest) || !SGEN_CLASS_HAS_REFERENCES (klass)) {
82 size_t element_size = mono_class_value_size (klass, NULL);
83 size_t size = count * element_size;
84 mono_gc_memmove_atomic (dest, src, size);
88 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
89 if (binary_protocol_is_heavy_enabled ()) {
90 size_t element_size = mono_class_value_size (klass, NULL);
92 for (i = 0; i < count; ++i) {
93 scan_object_for_binary_protocol_copy_wbarrier ((char*)dest + i * element_size,
94 (char*)src + i * element_size - sizeof (MonoObject),
95 (mword) klass->gc_descr);
100 sgen_get_remset ()->wbarrier_value_copy (dest, src, count, mono_class_value_size (klass, NULL));
104 * mono_gc_wbarrier_object_copy:
106 * Write barrier to call when obj is the result of a clone or copy of an object.
109 mono_gc_wbarrier_object_copy (MonoObject* obj, MonoObject *src)
113 HEAVY_STAT (++stat_wbarrier_object_copy);
115 if (sgen_ptr_in_nursery (obj) || ptr_on_stack (obj)) {
116 size = mono_object_class (obj)->instance_size;
117 mono_gc_memmove_aligned ((char*)obj + sizeof (MonoObject), (char*)src + sizeof (MonoObject),
118 size - sizeof (MonoObject));
122 #ifdef SGEN_HEAVY_BINARY_PROTOCOL
123 if (binary_protocol_is_heavy_enabled ())
124 scan_object_for_binary_protocol_copy_wbarrier (obj, (char*)src, (mword) src->vtable->gc_descr);
127 sgen_get_remset ()->wbarrier_object_copy (obj, src);
131 mono_gc_wbarrier_set_arrayref (MonoArray *arr, gpointer slot_ptr, MonoObject* value)
133 HEAVY_STAT (++stat_wbarrier_set_arrayref);
134 if (sgen_ptr_in_nursery (slot_ptr)) {
135 *(void**)slot_ptr = value;
138 SGEN_LOG (8, "Adding remset at %p", slot_ptr);
140 binary_protocol_wbarrier (slot_ptr, value, value->vtable);
142 sgen_get_remset ()->wbarrier_set_field ((MonoObject*)arr, slot_ptr, value);
146 * Dummy filler objects
149 /* Vtable of the objects used to fill out nursery fragments before a collection */
150 static MonoVTable *array_fill_vtable;
153 sgen_client_get_array_fill_vtable (void)
155 if (!array_fill_vtable) {
156 static MonoClass klass;
157 static char _vtable[sizeof(MonoVTable)+8];
158 MonoVTable* vtable = (MonoVTable*) ALIGN_TO(_vtable, 8);
161 MonoDomain *domain = mono_get_root_domain ();
164 klass.element_class = mono_defaults.byte_class;
166 klass.instance_size = sizeof (MonoArray);
167 klass.sizes.element_size = 1;
168 klass.name = "array_filler_type";
170 vtable->klass = &klass;
172 vtable->gc_descr = mono_gc_make_descr_for_array (TRUE, &bmap, 0, 1);
175 array_fill_vtable = vtable;
177 return array_fill_vtable;
181 sgen_client_array_fill_range (char *start, size_t size)
185 if (size < sizeof (MonoArray)) {
186 memset (start, 0, size);
190 o = (MonoArray*)start;
191 o->obj.vtable = sgen_client_get_array_fill_vtable ();
192 /* Mark this as not a real object */
193 o->obj.synchronisation = GINT_TO_POINTER (-1);
195 o->max_length = (mono_array_size_t)(size - sizeof (MonoArray));
201 sgen_client_zero_array_fill_header (void *p, size_t size)
203 if (size >= sizeof (MonoArray)) {
204 memset (p, 0, sizeof (MonoArray));
206 static guint8 zeros [sizeof (MonoArray)];
208 SGEN_ASSERT (0, !memcmp (p, zeros, size), "TLAB segment must be zeroed out.");
216 static MonoGCFinalizerCallbacks fin_callbacks;
219 mono_gc_get_vtable_bits (MonoClass *class)
222 /* FIXME move this to the bridge code */
223 if (sgen_need_bridge_processing ()) {
224 switch (sgen_bridge_class_kind (class)) {
225 case GC_BRIDGE_TRANSPARENT_BRIDGE_CLASS:
226 case GC_BRIDGE_OPAQUE_BRIDGE_CLASS:
227 res = SGEN_GC_BIT_BRIDGE_OBJECT;
229 case GC_BRIDGE_OPAQUE_CLASS:
230 res = SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT;
232 case GC_BRIDGE_TRANSPARENT_CLASS:
236 if (fin_callbacks.is_class_finalization_aware) {
237 if (fin_callbacks.is_class_finalization_aware (class))
238 res |= SGEN_GC_BIT_FINALIZER_AWARE;
244 is_finalization_aware (MonoObject *obj)
246 MonoVTable *vt = ((MonoVTable*)SGEN_LOAD_VTABLE (obj));
247 return (vt->gc_bits & SGEN_GC_BIT_FINALIZER_AWARE) == SGEN_GC_BIT_FINALIZER_AWARE;
251 sgen_client_object_queued_for_finalization (MonoObject *obj)
253 if (fin_callbacks.object_queued_for_finalization && is_finalization_aware (obj))
254 fin_callbacks.object_queued_for_finalization (obj);
258 mono_gc_register_finalizer_callbacks (MonoGCFinalizerCallbacks *callbacks)
260 if (callbacks->version != MONO_GC_FINALIZER_EXTENSION_VERSION)
261 g_error ("Invalid finalizer callback version. Expected %d but got %d\n", MONO_GC_FINALIZER_EXTENSION_VERSION, callbacks->version);
263 fin_callbacks = *callbacks;
270 typedef struct _EphemeronLinkNode EphemeronLinkNode;
272 struct _EphemeronLinkNode {
273 EphemeronLinkNode *next;
282 static EphemeronLinkNode *ephemeron_list;
284 /* LOCKING: requires that the GC lock is held */
286 null_ephemerons_for_domain (MonoDomain *domain)
288 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
291 MonoObject *object = (MonoObject*)current->array;
294 SGEN_ASSERT (0, object->vtable, "Can't have objects without vtables.");
296 if (object && object->vtable->domain == domain) {
297 EphemeronLinkNode *tmp = current;
300 prev->next = current->next;
302 ephemeron_list = current->next;
304 current = current->next;
305 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
308 current = current->next;
313 /* LOCKING: requires that the GC lock is held */
315 sgen_client_clear_unreachable_ephemerons (ScanCopyContext ctx)
317 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
318 SgenGrayQueue *queue = ctx.queue;
319 EphemeronLinkNode *current = ephemeron_list, *prev = NULL;
321 Ephemeron *cur, *array_end;
325 char *object = current->array;
327 if (!sgen_is_object_alive_for_current_gen (object)) {
328 EphemeronLinkNode *tmp = current;
330 SGEN_LOG (5, "Dead Ephemeron array at %p", object);
333 prev->next = current->next;
335 ephemeron_list = current->next;
337 current = current->next;
338 sgen_free_internal (tmp, INTERNAL_MEM_EPHEMERON_LINK);
343 copy_func ((void**)&object, queue);
344 current->array = object;
346 SGEN_LOG (5, "Clearing unreachable entries for ephemeron array at %p", object);
348 array = (MonoArray*)object;
349 cur = mono_array_addr (array, Ephemeron, 0);
350 array_end = cur + mono_array_length_fast (array);
351 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
353 for (; cur < array_end; ++cur) {
354 char *key = (char*)cur->key;
356 if (!key || key == tombstone)
359 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
360 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
361 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
363 if (!sgen_is_object_alive_for_current_gen (key)) {
364 cur->key = tombstone;
370 current = current->next;
375 LOCKING: requires that the GC lock is held
377 Limitations: We scan all ephemerons on every collection since the current design doesn't allow for a simple nursery/mature split.
380 sgen_client_mark_ephemerons (ScanCopyContext ctx)
382 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
383 SgenGrayQueue *queue = ctx.queue;
384 gboolean nothing_marked = TRUE;
385 EphemeronLinkNode *current = ephemeron_list;
387 Ephemeron *cur, *array_end;
390 for (current = ephemeron_list; current; current = current->next) {
391 char *object = current->array;
392 SGEN_LOG (5, "Ephemeron array at %p", object);
394 /*It has to be alive*/
395 if (!sgen_is_object_alive_for_current_gen (object)) {
396 SGEN_LOG (5, "\tnot reachable");
400 copy_func ((void**)&object, queue);
402 array = (MonoArray*)object;
403 cur = mono_array_addr (array, Ephemeron, 0);
404 array_end = cur + mono_array_length_fast (array);
405 tombstone = (char*)((MonoVTable*)SGEN_LOAD_VTABLE (object))->domain->ephemeron_tombstone;
407 for (; cur < array_end; ++cur) {
408 char *key = cur->key;
410 if (!key || key == tombstone)
413 SGEN_LOG (5, "[%td] key %p (%s) value %p (%s)", cur - mono_array_addr (array, Ephemeron, 0),
414 key, sgen_is_object_alive_for_current_gen (key) ? "reachable" : "unreachable",
415 cur->value, cur->value && sgen_is_object_alive_for_current_gen (cur->value) ? "reachable" : "unreachable");
417 if (sgen_is_object_alive_for_current_gen (key)) {
418 char *value = cur->value;
420 copy_func ((void**)&cur->key, queue);
422 if (!sgen_is_object_alive_for_current_gen (value))
423 nothing_marked = FALSE;
424 copy_func ((void**)&cur->value, queue);
430 SGEN_LOG (5, "Ephemeron run finished. Is it done %d", nothing_marked);
431 return nothing_marked;
435 mono_gc_ephemeron_array_add (MonoObject *obj)
437 EphemeronLinkNode *node;
441 node = sgen_alloc_internal (INTERNAL_MEM_EPHEMERON_LINK);
446 node->array = (char*)obj;
447 node->next = ephemeron_list;
448 ephemeron_list = node;
450 SGEN_LOG (5, "Registered ephemeron array %p", obj);
461 need_remove_object_for_domain (char *start, MonoDomain *domain)
463 if (mono_object_domain (start) == domain) {
464 SGEN_LOG (4, "Need to cleanup object %p", start);
465 binary_protocol_cleanup (start, (gpointer)SGEN_LOAD_VTABLE (start), sgen_safe_object_get_size ((MonoObject*)start));
472 process_object_for_domain_clearing (char *start, MonoDomain *domain)
474 GCVTable *vt = (GCVTable*)SGEN_LOAD_VTABLE (start);
475 if (vt->klass == mono_defaults.internal_thread_class)
476 g_assert (mono_object_domain (start) == mono_get_root_domain ());
477 /* The object could be a proxy for an object in the domain
479 #ifndef DISABLE_REMOTING
480 if (mono_defaults.real_proxy_class->supertypes && mono_class_has_parent_fast (vt->klass, mono_defaults.real_proxy_class)) {
481 MonoObject *server = ((MonoRealProxy*)start)->unwrapped_server;
483 /* The server could already have been zeroed out, so
484 we need to check for that, too. */
485 if (server && (!SGEN_LOAD_VTABLE (server) || mono_object_domain (server) == domain)) {
486 SGEN_LOG (4, "Cleaning up remote pointer in %p to object %p", start, server);
487 ((MonoRealProxy*)start)->unwrapped_server = NULL;
494 clear_domain_process_object (char *obj, MonoDomain *domain)
498 process_object_for_domain_clearing (obj, domain);
499 remove = need_remove_object_for_domain (obj, domain);
501 if (remove && ((MonoObject*)obj)->synchronisation) {
502 void **dislink = mono_monitor_get_object_monitor_weak_link ((MonoObject*)obj);
504 sgen_register_disappearing_link (NULL, dislink, FALSE, TRUE);
511 clear_domain_process_minor_object_callback (char *obj, size_t size, MonoDomain *domain)
513 if (clear_domain_process_object (obj, domain)) {
514 CANARIFY_SIZE (size);
515 memset (obj, 0, size);
520 clear_domain_process_major_object_callback (char *obj, size_t size, MonoDomain *domain)
522 clear_domain_process_object (obj, domain);
526 clear_domain_free_major_non_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
528 if (need_remove_object_for_domain (obj, domain))
529 major_collector.free_non_pinned_object (obj, size);
533 clear_domain_free_major_pinned_object_callback (char *obj, size_t size, MonoDomain *domain)
535 if (need_remove_object_for_domain (obj, domain))
536 major_collector.free_pinned_object (obj, size);
540 * When appdomains are unloaded we can easily remove objects that have finalizers,
541 * but all the others could still be present in random places on the heap.
542 * We need a sweep to get rid of them even though it's going to be costly
544 * The reason we need to remove them is because we access the vtable and class
545 * structures to know the object size and the reference bitmap: once the domain is
546 * unloaded the point to random memory.
549 mono_gc_clear_domain (MonoDomain * domain)
551 LOSObject *bigobj, *prev;
556 binary_protocol_domain_unload_begin (domain);
560 if (sgen_concurrent_collection_in_progress ())
561 sgen_perform_collection (0, GENERATION_OLD, "clear domain", TRUE);
562 SGEN_ASSERT (0, !sgen_concurrent_collection_in_progress (), "We just ordered a synchronous collection. Why are we collecting concurrently?");
564 major_collector.finish_sweeping ();
566 sgen_process_fin_stage_entries ();
567 sgen_process_dislink_stage_entries ();
569 sgen_clear_nursery_fragments ();
571 if (sgen_mono_xdomain_checks && domain != mono_get_root_domain ()) {
572 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_NORMAL);
573 sgen_scan_for_registered_roots_in_domain (domain, ROOT_TYPE_WBARRIER);
574 sgen_check_for_xdomain_refs ();
577 /*Ephemerons and dislinks must be processed before LOS since they might end up pointing
578 to memory returned to the OS.*/
579 null_ephemerons_for_domain (domain);
581 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
582 sgen_null_links_for_domain (domain, i);
584 for (i = GENERATION_NURSERY; i < GENERATION_MAX; ++i)
585 sgen_remove_finalizers_for_domain (domain, i);
587 sgen_scan_area_with_callback (nursery_section->data, nursery_section->end_data,
588 (IterateObjectCallbackFunc)clear_domain_process_minor_object_callback, domain, FALSE);
590 /* We need two passes over major and large objects because
591 freeing such objects might give their memory back to the OS
592 (in the case of large objects) or obliterate its vtable
593 (pinned objects with major-copying or pinned and non-pinned
594 objects with major-mark&sweep), but we might need to
595 dereference a pointer from an object to another object if
596 the first object is a proxy. */
597 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)clear_domain_process_major_object_callback, domain);
598 for (bigobj = los_object_list; bigobj; bigobj = bigobj->next)
599 clear_domain_process_object (bigobj->data, domain);
602 for (bigobj = los_object_list; bigobj;) {
603 if (need_remove_object_for_domain (bigobj->data, domain)) {
604 LOSObject *to_free = bigobj;
606 prev->next = bigobj->next;
608 los_object_list = bigobj->next;
609 bigobj = bigobj->next;
610 SGEN_LOG (4, "Freeing large object %p", bigobj->data);
611 sgen_los_free_object (to_free);
615 bigobj = bigobj->next;
617 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_NON_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_non_pinned_object_callback, domain);
618 major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_PINNED, (IterateObjectCallbackFunc)clear_domain_free_major_pinned_object_callback, domain);
620 if (domain == mono_get_root_domain ()) {
621 sgen_pin_stats_print_class_stats ();
622 sgen_object_layout_dump (stdout);
625 sgen_restart_world (0, NULL);
627 binary_protocol_domain_unload_end (domain);
628 binary_protocol_flush_buffers (FALSE);
637 static MonoMethod* alloc_method_cache [ATYPE_NUM];
638 static gboolean use_managed_allocator = TRUE;
640 #ifdef MANAGED_ALLOCATION
641 #define OPDEF(a,b,c,d,e,f,g,h,i,j) \
645 #include "mono/cil/opcode.def"
651 #ifdef HAVE_KW_THREAD
653 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
654 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
655 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
656 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_NEXT_ADDR); \
659 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
660 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
661 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
662 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_TLAB_TEMP_END); \
667 #if defined(__APPLE__) || defined (HOST_WIN32)
668 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { \
669 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
670 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
671 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
672 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_next_addr)); \
673 mono_mb_emit_byte ((mb), CEE_ADD); \
674 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
677 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { \
678 mono_mb_emit_byte ((mb), MONO_CUSTOM_PREFIX); \
679 mono_mb_emit_byte ((mb), CEE_MONO_TLS); \
680 mono_mb_emit_i4 ((mb), TLS_KEY_SGEN_THREAD_INFO); \
681 mono_mb_emit_icon ((mb), MONO_STRUCT_OFFSET (SgenThreadInfo, tlab_temp_end)); \
682 mono_mb_emit_byte ((mb), CEE_ADD); \
683 mono_mb_emit_byte ((mb), CEE_LDIND_I); \
687 #define EMIT_TLS_ACCESS_NEXT_ADDR(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
688 #define EMIT_TLS_ACCESS_TEMP_END(mb) do { g_error ("sgen is not supported when using --with-tls=pthread.\n"); } while (0)
693 /* FIXME: Do this in the JIT, where specialized allocation sequences can be created
694 * for each class. This is currently not easy to do, as it is hard to generate basic
695 * blocks + branches, but it is easy with the linear IL codebase.
697 * For this to work we'd need to solve the TLAB race, first. Now we
698 * require the allocator to be in a few known methods to make sure
699 * that they are executed atomically via the restart mechanism.
702 create_allocator (int atype)
705 guint32 slowpath_branch, max_size_branch;
706 MonoMethodBuilder *mb;
708 MonoMethodSignature *csig;
709 static gboolean registered = FALSE;
710 int tlab_next_addr_var, new_next_var;
712 const char *name = NULL;
713 AllocatorWrapperInfo *info;
716 mono_register_jit_icall (mono_gc_alloc_obj, "mono_gc_alloc_obj", mono_create_icall_signature ("object ptr int"), FALSE);
717 mono_register_jit_icall (mono_gc_alloc_vector, "mono_gc_alloc_vector", mono_create_icall_signature ("object ptr int int"), FALSE);
718 mono_register_jit_icall (mono_gc_alloc_string, "mono_gc_alloc_string", mono_create_icall_signature ("object ptr int int32"), FALSE);
722 if (atype == ATYPE_SMALL) {
725 } else if (atype == ATYPE_NORMAL) {
728 } else if (atype == ATYPE_VECTOR) {
730 name = "AllocVector";
731 } else if (atype == ATYPE_STRING) {
733 name = "AllocString";
735 g_assert_not_reached ();
738 csig = mono_metadata_signature_alloc (mono_defaults.corlib, num_params);
739 if (atype == ATYPE_STRING) {
740 csig->ret = &mono_defaults.string_class->byval_arg;
741 csig->params [0] = &mono_defaults.int_class->byval_arg;
742 csig->params [1] = &mono_defaults.int32_class->byval_arg;
744 csig->ret = &mono_defaults.object_class->byval_arg;
745 for (i = 0; i < num_params; ++i)
746 csig->params [i] = &mono_defaults.int_class->byval_arg;
749 mb = mono_mb_new (mono_defaults.object_class, name, MONO_WRAPPER_ALLOC);
752 size_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
753 if (atype == ATYPE_SMALL) {
754 /* size_var = size_arg */
755 mono_mb_emit_ldarg (mb, 1);
756 mono_mb_emit_stloc (mb, size_var);
757 } else if (atype == ATYPE_NORMAL) {
758 /* size = vtable->klass->instance_size; */
759 mono_mb_emit_ldarg (mb, 0);
760 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
761 mono_mb_emit_byte (mb, CEE_ADD);
762 mono_mb_emit_byte (mb, CEE_LDIND_I);
763 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, instance_size));
764 mono_mb_emit_byte (mb, CEE_ADD);
765 /* FIXME: assert instance_size stays a 4 byte integer */
766 mono_mb_emit_byte (mb, CEE_LDIND_U4);
767 mono_mb_emit_byte (mb, CEE_CONV_I);
768 mono_mb_emit_stloc (mb, size_var);
769 } else if (atype == ATYPE_VECTOR) {
770 MonoExceptionClause *clause;
771 int pos, pos_leave, pos_error;
772 MonoClass *oom_exc_class;
776 * n > MONO_ARRAY_MAX_INDEX => OutOfMemoryException
777 * n < 0 => OverflowException
779 * We can do an unsigned comparison to catch both cases, then in the error
780 * case compare signed to distinguish between them.
782 mono_mb_emit_ldarg (mb, 1);
783 mono_mb_emit_icon (mb, MONO_ARRAY_MAX_INDEX);
784 mono_mb_emit_byte (mb, CEE_CONV_U);
785 pos = mono_mb_emit_short_branch (mb, CEE_BLE_UN_S);
787 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
788 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
789 mono_mb_emit_ldarg (mb, 1);
790 mono_mb_emit_icon (mb, 0);
791 pos_error = mono_mb_emit_short_branch (mb, CEE_BLT_S);
792 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
793 mono_mb_patch_short_branch (mb, pos_error);
794 mono_mb_emit_exception (mb, "OverflowException", NULL);
796 mono_mb_patch_short_branch (mb, pos);
798 clause = mono_image_alloc0 (mono_defaults.corlib, sizeof (MonoExceptionClause));
799 clause->try_offset = mono_mb_get_label (mb);
801 /* vtable->klass->sizes.element_size */
802 mono_mb_emit_ldarg (mb, 0);
803 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoVTable, klass));
804 mono_mb_emit_byte (mb, CEE_ADD);
805 mono_mb_emit_byte (mb, CEE_LDIND_I);
806 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoClass, sizes));
807 mono_mb_emit_byte (mb, CEE_ADD);
808 mono_mb_emit_byte (mb, CEE_LDIND_U4);
809 mono_mb_emit_byte (mb, CEE_CONV_I);
812 mono_mb_emit_ldarg (mb, 1);
813 mono_mb_emit_byte (mb, CEE_MUL_OVF_UN);
814 /* + sizeof (MonoArray) */
815 mono_mb_emit_icon (mb, sizeof (MonoArray));
816 mono_mb_emit_byte (mb, CEE_ADD_OVF_UN);
817 mono_mb_emit_stloc (mb, size_var);
819 pos_leave = mono_mb_emit_branch (mb, CEE_LEAVE);
822 clause->flags = MONO_EXCEPTION_CLAUSE_NONE;
823 clause->try_len = mono_mb_get_pos (mb) - clause->try_offset;
824 clause->data.catch_class = mono_class_from_name (mono_defaults.corlib,
825 "System", "OverflowException");
826 g_assert (clause->data.catch_class);
827 clause->handler_offset = mono_mb_get_label (mb);
829 oom_exc_class = mono_class_from_name (mono_defaults.corlib,
830 "System", "OutOfMemoryException");
831 g_assert (oom_exc_class);
832 ctor = mono_class_get_method_from_name (oom_exc_class, ".ctor", 0);
835 mono_mb_emit_byte (mb, CEE_POP);
836 mono_mb_emit_op (mb, CEE_NEWOBJ, ctor);
837 mono_mb_emit_byte (mb, CEE_THROW);
839 clause->handler_len = mono_mb_get_pos (mb) - clause->handler_offset;
840 mono_mb_set_clauses (mb, 1, clause);
841 mono_mb_patch_branch (mb, pos_leave);
843 } else if (atype == ATYPE_STRING) {
847 * a string allocator method takes the args: (vtable, len)
849 * bytes = offsetof (MonoString, chars) + ((len + 1) * 2)
853 * bytes <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
857 * offsetof (MonoString, chars) + ((len + 1) * 2) <= INT32_MAX - (SGEN_ALLOC_ALIGN - 1)
858 * len <= (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - offsetof (MonoString, chars)) / 2 - 1
860 mono_mb_emit_ldarg (mb, 1);
861 mono_mb_emit_icon (mb, (INT32_MAX - (SGEN_ALLOC_ALIGN - 1) - MONO_STRUCT_OFFSET (MonoString, chars)) / 2 - 1);
862 pos = mono_mb_emit_short_branch (mb, MONO_CEE_BLE_UN_S);
864 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
865 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
866 mono_mb_emit_exception (mb, "OutOfMemoryException", NULL);
867 mono_mb_patch_short_branch (mb, pos);
869 mono_mb_emit_ldarg (mb, 1);
870 mono_mb_emit_icon (mb, 1);
871 mono_mb_emit_byte (mb, MONO_CEE_SHL);
872 //WE manually fold the above + 2 here
873 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, chars) + 2);
874 mono_mb_emit_byte (mb, CEE_ADD);
875 mono_mb_emit_stloc (mb, size_var);
877 g_assert_not_reached ();
880 if (atype != ATYPE_SMALL) {
881 /* size += ALLOC_ALIGN - 1; */
882 mono_mb_emit_ldloc (mb, size_var);
883 mono_mb_emit_icon (mb, SGEN_ALLOC_ALIGN - 1);
884 mono_mb_emit_byte (mb, CEE_ADD);
885 /* size &= ~(ALLOC_ALIGN - 1); */
886 mono_mb_emit_icon (mb, ~(SGEN_ALLOC_ALIGN - 1));
887 mono_mb_emit_byte (mb, CEE_AND);
888 mono_mb_emit_stloc (mb, size_var);
891 /* if (size > MAX_SMALL_OBJ_SIZE) goto slowpath */
892 if (atype != ATYPE_SMALL) {
893 mono_mb_emit_ldloc (mb, size_var);
894 mono_mb_emit_icon (mb, SGEN_MAX_SMALL_OBJ_SIZE);
895 max_size_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BGT_UN_S);
899 * We need to modify tlab_next, but the JIT only supports reading, so we read
900 * another tls var holding its address instead.
903 /* tlab_next_addr (local) = tlab_next_addr (TLS var) */
904 tlab_next_addr_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
905 EMIT_TLS_ACCESS_NEXT_ADDR (mb);
906 mono_mb_emit_stloc (mb, tlab_next_addr_var);
908 /* p = (void**)tlab_next; */
909 p_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
910 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
911 mono_mb_emit_byte (mb, CEE_LDIND_I);
912 mono_mb_emit_stloc (mb, p_var);
914 /* new_next = (char*)p + size; */
915 new_next_var = mono_mb_add_local (mb, &mono_defaults.int_class->byval_arg);
916 mono_mb_emit_ldloc (mb, p_var);
917 mono_mb_emit_ldloc (mb, size_var);
918 mono_mb_emit_byte (mb, CEE_CONV_I);
919 mono_mb_emit_byte (mb, CEE_ADD);
920 mono_mb_emit_stloc (mb, new_next_var);
922 /* if (G_LIKELY (new_next < tlab_temp_end)) */
923 mono_mb_emit_ldloc (mb, new_next_var);
924 EMIT_TLS_ACCESS_TEMP_END (mb);
925 slowpath_branch = mono_mb_emit_short_branch (mb, MONO_CEE_BLT_UN_S);
928 if (atype != ATYPE_SMALL)
929 mono_mb_patch_short_branch (mb, max_size_branch);
931 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
932 mono_mb_emit_byte (mb, CEE_MONO_NOT_TAKEN);
934 /* FIXME: mono_gc_alloc_obj takes a 'size_t' as an argument, not an int32 */
935 mono_mb_emit_ldarg (mb, 0);
936 mono_mb_emit_ldloc (mb, size_var);
937 if (atype == ATYPE_NORMAL || atype == ATYPE_SMALL) {
938 mono_mb_emit_icall (mb, mono_gc_alloc_obj);
939 } else if (atype == ATYPE_VECTOR) {
940 mono_mb_emit_ldarg (mb, 1);
941 mono_mb_emit_icall (mb, mono_gc_alloc_vector);
942 } else if (atype == ATYPE_STRING) {
943 mono_mb_emit_ldarg (mb, 1);
944 mono_mb_emit_icall (mb, mono_gc_alloc_string);
946 g_assert_not_reached ();
948 mono_mb_emit_byte (mb, CEE_RET);
951 mono_mb_patch_short_branch (mb, slowpath_branch);
953 /* FIXME: Memory barrier */
955 /* tlab_next = new_next */
956 mono_mb_emit_ldloc (mb, tlab_next_addr_var);
957 mono_mb_emit_ldloc (mb, new_next_var);
958 mono_mb_emit_byte (mb, CEE_STIND_I);
960 /*The tlab store must be visible before the the vtable store. This could be replaced with a DDS but doing it with IL would be tricky. */
961 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
962 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
963 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
966 mono_mb_emit_ldloc (mb, p_var);
967 mono_mb_emit_ldarg (mb, 0);
968 mono_mb_emit_byte (mb, CEE_STIND_I);
970 if (atype == ATYPE_VECTOR) {
971 /* arr->max_length = max_length; */
972 mono_mb_emit_ldloc (mb, p_var);
973 mono_mb_emit_ldflda (mb, MONO_STRUCT_OFFSET (MonoArray, max_length));
974 mono_mb_emit_ldarg (mb, 1);
975 #ifdef MONO_BIG_ARRAYS
976 mono_mb_emit_byte (mb, CEE_STIND_I);
978 mono_mb_emit_byte (mb, CEE_STIND_I4);
980 } else if (atype == ATYPE_STRING) {
981 /* need to set length and clear the last char */
982 /* s->length = len; */
983 mono_mb_emit_ldloc (mb, p_var);
984 mono_mb_emit_icon (mb, MONO_STRUCT_OFFSET (MonoString, length));
985 mono_mb_emit_byte (mb, MONO_CEE_ADD);
986 mono_mb_emit_ldarg (mb, 1);
987 mono_mb_emit_byte (mb, MONO_CEE_STIND_I4);
988 /* s->chars [len] = 0; */
989 mono_mb_emit_ldloc (mb, p_var);
990 mono_mb_emit_ldloc (mb, size_var);
991 mono_mb_emit_icon (mb, 2);
992 mono_mb_emit_byte (mb, MONO_CEE_SUB);
993 mono_mb_emit_byte (mb, MONO_CEE_ADD);
994 mono_mb_emit_icon (mb, 0);
995 mono_mb_emit_byte (mb, MONO_CEE_STIND_I2);
999 We must make sure both vtable and max_length are globaly visible before returning to managed land.
1001 mono_mb_emit_byte (mb, MONO_CUSTOM_PREFIX);
1002 mono_mb_emit_byte (mb, CEE_MONO_MEMORY_BARRIER);
1003 mono_mb_emit_i4 (mb, MONO_MEMORY_BARRIER_REL);
1006 mono_mb_emit_ldloc (mb, p_var);
1007 mono_mb_emit_byte (mb, CEE_RET);
1010 res = mono_mb_create_method (mb, csig, 8);
1012 mono_method_get_header (res)->init_locals = FALSE;
1014 info = mono_image_alloc0 (mono_defaults.corlib, sizeof (AllocatorWrapperInfo));
1015 info->gc_name = "sgen";
1016 info->alloc_type = atype;
1017 mono_marshal_set_wrapper_info (res, info);
1024 mono_gc_get_aligned_size_for_allocator (int size)
1026 int aligned_size = size;
1027 aligned_size += SGEN_ALLOC_ALIGN - 1;
1028 aligned_size &= ~(SGEN_ALLOC_ALIGN - 1);
1029 return aligned_size;
1033 * Generate an allocator method implementing the fast path of mono_gc_alloc_obj ().
1034 * The signature of the called method is:
1035 * object allocate (MonoVTable *vtable)
1038 mono_gc_get_managed_allocator (MonoClass *klass, gboolean for_box, gboolean known_instance_size)
1040 #ifdef MANAGED_ALLOCATION
1041 if (collect_before_allocs)
1043 if (!mono_runtime_has_tls_get ())
1045 if (klass->instance_size > tlab_size)
1047 if (known_instance_size && ALIGN_TO (klass->instance_size, SGEN_ALLOC_ALIGN) >= SGEN_MAX_SMALL_OBJ_SIZE)
1049 if (klass->has_finalize || mono_class_is_marshalbyref (klass) || (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS))
1053 if (klass->byval_arg.type == MONO_TYPE_STRING)
1054 return mono_gc_get_managed_allocator_by_type (ATYPE_STRING);
1055 /* Generic classes have dynamic field and can go above MAX_SMALL_OBJ_SIZE. */
1056 if (known_instance_size)
1057 return mono_gc_get_managed_allocator_by_type (ATYPE_SMALL);
1059 return mono_gc_get_managed_allocator_by_type (ATYPE_NORMAL);
1066 mono_gc_get_managed_array_allocator (MonoClass *klass)
1068 #ifdef MANAGED_ALLOCATION
1069 if (klass->rank != 1)
1071 if (!mono_runtime_has_tls_get ())
1073 if (mono_profiler_get_events () & MONO_PROFILE_ALLOCATIONS)
1075 if (has_per_allocation_action)
1077 g_assert (!mono_class_has_finalizer (klass) && !mono_class_is_marshalbyref (klass));
1079 return mono_gc_get_managed_allocator_by_type (ATYPE_VECTOR);
1086 sgen_set_use_managed_allocator (gboolean flag)
1088 use_managed_allocator = flag;
1092 mono_gc_get_managed_allocator_by_type (int atype)
1094 #ifdef MANAGED_ALLOCATION
1097 if (!use_managed_allocator)
1100 if (!mono_runtime_has_tls_get ())
1103 res = alloc_method_cache [atype];
1107 res = create_allocator (atype);
1109 if (alloc_method_cache [atype]) {
1110 mono_free_method (res);
1111 res = alloc_method_cache [atype];
1113 mono_memory_barrier ();
1114 alloc_method_cache [atype] = res;
1125 mono_gc_get_managed_allocator_types (void)
1131 sgen_is_managed_allocator (MonoMethod *method)
1135 for (i = 0; i < ATYPE_NUM; ++i)
1136 if (method == alloc_method_cache [i])
1142 sgen_has_managed_allocator (void)
1146 for (i = 0; i < ATYPE_NUM; ++i)
1147 if (alloc_method_cache [i])
1153 * Cardtable scanning
1156 #define MWORD_MASK (sizeof (mword) - 1)
1159 find_card_offset (mword card)
1161 /*XXX Use assembly as this generates some pretty bad code */
1162 #if defined(__i386__) && defined(__GNUC__)
1163 return (__builtin_ffs (card) - 1) / 8;
1164 #elif defined(__x86_64__) && defined(__GNUC__)
1165 return (__builtin_ffsll (card) - 1) / 8;
1166 #elif defined(__s390x__)
1167 return (__builtin_ffsll (GUINT64_TO_LE(card)) - 1) / 8;
1170 guint8 *ptr = (guint8 *) &card;
1171 for (i = 0; i < sizeof (mword); ++i) {
1180 find_next_card (guint8 *card_data, guint8 *end)
1182 mword *cards, *cards_end;
1185 while ((((mword)card_data) & MWORD_MASK) && card_data < end) {
1191 if (card_data == end)
1194 cards = (mword*)card_data;
1195 cards_end = (mword*)((mword)end & ~MWORD_MASK);
1196 while (cards < cards_end) {
1199 return (guint8*)cards + find_card_offset (card);
1203 card_data = (guint8*)cards_end;
1204 while (card_data < end) {
1213 #define ARRAY_OBJ_INDEX(ptr,array,elem_size) (((char*)(ptr) - ((char*)(array) + G_STRUCT_OFFSET (MonoArray, vector))) / (elem_size))
1216 sgen_client_cardtable_scan_object (char *obj, mword block_obj_size, guint8 *cards, gboolean mod_union, ScanCopyContext ctx)
1218 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1219 MonoClass *klass = vt->klass;
1221 SGEN_ASSERT (0, SGEN_VTABLE_HAS_REFERENCES (vt), "Why would we ever call this on reference-free objects?");
1224 guint8 *card_data, *card_base;
1225 guint8 *card_data_end;
1226 char *obj_start = sgen_card_table_align_pointer (obj);
1227 mword obj_size = sgen_client_par_object_get_size (vt, (MonoObject*)obj);
1228 char *obj_end = obj + obj_size;
1230 size_t extra_idx = 0;
1232 MonoArray *arr = (MonoArray*)obj;
1233 mword desc = (mword)klass->element_class->gc_descr;
1234 int elem_size = mono_array_element_size (klass);
1236 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1237 guint8 *overflow_scan_end = NULL;
1240 #ifdef SGEN_OBJECT_LAYOUT_STATISTICS
1241 if (klass->element_class->valuetype)
1242 sgen_object_layout_scanned_vtype_array ();
1244 sgen_object_layout_scanned_ref_array ();
1250 card_data = sgen_card_table_get_card_scan_address ((mword)obj);
1252 card_base = card_data;
1253 card_count = sgen_card_table_number_of_cards_in_range ((mword)obj, obj_size);
1254 card_data_end = card_data + card_count;
1257 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1258 /*Check for overflow and if so, setup to scan in two steps*/
1259 if (!cards && card_data_end >= SGEN_SHADOW_CARDTABLE_END) {
1260 overflow_scan_end = sgen_shadow_cardtable + (card_data_end - SGEN_SHADOW_CARDTABLE_END);
1261 card_data_end = SGEN_SHADOW_CARDTABLE_END;
1267 card_data = find_next_card (card_data, card_data_end);
1268 for (; card_data < card_data_end; card_data = find_next_card (card_data + 1, card_data_end)) {
1270 size_t idx = (card_data - card_base) + extra_idx;
1271 char *start = (char*)(obj_start + idx * CARD_SIZE_IN_BYTES);
1272 char *card_end = start + CARD_SIZE_IN_BYTES;
1273 char *first_elem, *elem;
1275 HEAVY_STAT (++los_marked_cards);
1278 sgen_card_table_prepare_card_for_scanning (card_data);
1280 card_end = MIN (card_end, obj_end);
1282 if (start <= (char*)arr->vector)
1285 index = ARRAY_OBJ_INDEX (start, obj, elem_size);
1287 elem = first_elem = (char*)mono_array_addr_with_size_fast ((MonoArray*)obj, elem_size, index);
1288 if (klass->element_class->valuetype) {
1289 ScanVTypeFunc scan_vtype_func = ctx.ops->scan_vtype;
1291 for (; elem < card_end; elem += elem_size)
1292 scan_vtype_func (obj, elem, desc, ctx.queue BINARY_PROTOCOL_ARG (elem_size));
1294 CopyOrMarkObjectFunc copy_func = ctx.ops->copy_or_mark_object;
1296 HEAVY_STAT (++los_array_cards);
1297 for (; elem < card_end; elem += SIZEOF_VOID_P) {
1298 gpointer new, old = *(gpointer*)elem;
1299 if ((mod_union && old) || G_UNLIKELY (sgen_ptr_in_nursery (old))) {
1300 HEAVY_STAT (++los_array_remsets);
1301 copy_func ((void**)elem, ctx.queue);
1302 new = *(gpointer*)elem;
1303 if (G_UNLIKELY (sgen_ptr_in_nursery (new)))
1304 sgen_add_to_global_remset (elem, new);
1309 binary_protocol_card_scan (first_elem, elem - first_elem);
1312 #ifdef SGEN_HAVE_OVERLAPPING_CARDS
1313 if (overflow_scan_end) {
1314 extra_idx = card_data - card_base;
1315 card_base = card_data = sgen_shadow_cardtable;
1316 card_data_end = overflow_scan_end;
1317 overflow_scan_end = NULL;
1328 * Array and string allocation
1332 mono_gc_alloc_vector (MonoVTable *vtable, size_t size, uintptr_t max_length)
1337 if (!SGEN_CAN_ALIGN_UP (size))
1340 #ifndef DISABLE_CRITICAL_REGION
1341 ENTER_CRITICAL_REGION;
1342 arr = sgen_try_alloc_obj_nolock (vtable, size);
1344 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1345 arr->max_length = (mono_array_size_t)max_length;
1346 EXIT_CRITICAL_REGION;
1349 EXIT_CRITICAL_REGION;
1354 arr = sgen_alloc_obj_nolock (vtable, size);
1355 if (G_UNLIKELY (!arr)) {
1357 return mono_gc_out_of_memory (size);
1360 arr->max_length = (mono_array_size_t)max_length;
1365 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (MonoObject*)arr)), "Vector has incorrect size.");
1370 mono_gc_alloc_array (MonoVTable *vtable, size_t size, uintptr_t max_length, uintptr_t bounds_size)
1373 MonoArrayBounds *bounds;
1376 if (!SGEN_CAN_ALIGN_UP (size))
1379 #ifndef DISABLE_CRITICAL_REGION
1380 ENTER_CRITICAL_REGION;
1381 arr = sgen_try_alloc_obj_nolock (vtable, size);
1383 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1384 arr->max_length = (mono_array_size_t)max_length;
1386 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1387 arr->bounds = bounds;
1388 EXIT_CRITICAL_REGION;
1391 EXIT_CRITICAL_REGION;
1396 arr = sgen_alloc_obj_nolock (vtable, size);
1397 if (G_UNLIKELY (!arr)) {
1399 return mono_gc_out_of_memory (size);
1402 arr->max_length = (mono_array_size_t)max_length;
1404 bounds = (MonoArrayBounds*)((char*)arr + size - bounds_size);
1405 arr->bounds = bounds;
1410 SGEN_ASSERT (6, SGEN_ALIGN_UP (size) == SGEN_ALIGN_UP (sgen_client_par_object_get_size (vtable, (MonoObject*)arr)), "Array has incorrect size.");
1415 mono_gc_alloc_string (MonoVTable *vtable, size_t size, gint32 len)
1420 if (!SGEN_CAN_ALIGN_UP (size))
1423 #ifndef DISABLE_CRITICAL_REGION
1424 ENTER_CRITICAL_REGION;
1425 str = sgen_try_alloc_obj_nolock (vtable, size);
1427 /*This doesn't require fencing since EXIT_CRITICAL_REGION already does it for us*/
1429 EXIT_CRITICAL_REGION;
1432 EXIT_CRITICAL_REGION;
1437 str = sgen_alloc_obj_nolock (vtable, size);
1438 if (G_UNLIKELY (!str)) {
1440 return mono_gc_out_of_memory (size);
1455 mono_gc_set_string_length (MonoString *str, gint32 new_length)
1457 mono_unichar2 *new_end = str->chars + new_length;
1459 /* zero the discarded string. This null-delimits the string and allows
1460 * the space to be reclaimed by SGen. */
1462 if (nursery_canaries_enabled () && sgen_ptr_in_nursery (str)) {
1463 CHECK_CANARY_FOR_OBJECT (str);
1464 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2) + CANARY_SIZE);
1465 memcpy (new_end + 1 , CANARY_STRING, CANARY_SIZE);
1467 memset (new_end, 0, (str->length - new_length + 1) * sizeof (mono_unichar2));
1470 str->length = new_length;
1478 sgen_client_description_for_internal_mem_type (int type)
1481 case INTERNAL_MEM_EPHEMERON_LINK: return "ephemeron-link";
1488 sgen_client_pre_collection_checks (void)
1490 if (sgen_mono_xdomain_checks) {
1491 sgen_clear_nursery_fragments ();
1492 sgen_check_for_xdomain_refs ();
1497 sgen_client_object_safe_name (MonoObject *obj)
1499 MonoVTable *vt = (MonoVTable*)SGEN_LOAD_VTABLE (obj);
1500 return vt->klass->name;
1508 sgen_client_init (void)
1510 sgen_register_fixed_internal_mem_type (INTERNAL_MEM_EPHEMERON_LINK, sizeof (EphemeronLinkNode));
1514 sgen_client_handle_gc_debug (const char *opt)
1516 if (!strcmp (opt, "xdomain-checks")) {
1517 sgen_mono_xdomain_checks = TRUE;
1525 sgen_client_print_gc_debug_usage (void)
1527 fprintf (stderr, " xdomain-checks\n");