}
}
-void (*GC_notify_event) GC_PROTO((GCEventType e));
+void (*GC_notify_event) GC_PROTO((GC_EventType e));
void (*GC_on_heap_resize) GC_PROTO((size_t new_size));
+GC_API void GC_set_on_collection_event (void (*fn) (GC_EventType))
+{
+ DCL_LOCK_STATE;
+ LOCK();
+ GC_notify_event = fn;
+ UNLOCK();
+}
+
/* Finish up a collection. Assumes lock is held, signals are disabled, */
/* but the world is otherwise running. */
void GC_finish_collection()
GC_hidden_pointer weak_ref;
} GCToggleRef;
-static int (*GC_toggleref_callback) (GC_PTR obj);
+static GC_ToggleRefStatus (*GC_toggleref_callback) (GC_PTR obj);
static GCToggleRef *GC_toggleref_array;
static int GC_toggleref_array_size;
static int GC_toggleref_array_capacity;
int toggle_ref_counts [3] = { 0, 0, 0 };
for (i = w = 0; i < GC_toggleref_array_size; ++i) {
- int res;
+ GC_ToggleRefStatus res;
GCToggleRef r = GC_toggleref_array [i];
GC_PTR obj;
res = GC_toggleref_callback (obj);
++toggle_ref_counts [res];
switch (res) {
- case 0:
+ case GC_TOGGLE_REF_DROP:
break;
- case 1:
+ case GC_TOGGLE_REF_STRONG:
GC_toggleref_array [w].strong_ref = obj;
GC_toggleref_array [w].weak_ref = (GC_hidden_pointer)NULL;
++w;
break;
- case 2:
+ case GC_TOGGLE_REF_WEAK:
GC_toggleref_array [w].strong_ref = NULL;
GC_toggleref_array [w].weak_ref = HIDE_POINTER (obj);
++w;
static void (*GC_object_finalized_proc) (GC_PTR obj);
void
-GC_set_finalizer_notify_proc (void (*proc) (GC_PTR obj))
+GC_set_await_finalize_proc (void (*proc) (GC_PTR obj))
{
GC_object_finalized_proc = proc;
}
-void GC_toggleref_register_callback(int (*proccess_toggleref) (GC_PTR obj))
+void GC_set_toggleref_func(GC_ToggleRefStatus (*proccess_toggleref) (GC_PTR obj))
{
GC_toggleref_callback = proccess_toggleref;
}
-static void
+static GC_bool
ensure_toggleref_capacity (int capacity)
{
if (!GC_toggleref_array) {
GC_toggleref_array_capacity = 32;
GC_toggleref_array = (GCToggleRef *) GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE (GC_toggleref_array_capacity * sizeof (GCToggleRef), NORMAL);
+ if (!GC_toggleref_array)
+ return FALSE;
}
if (GC_toggleref_array_size + capacity >= GC_toggleref_array_capacity) {
GCToggleRef *tmp;
GC_toggleref_array_capacity *= 2;
tmp = (GCToggleRef *) GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE (GC_toggleref_array_capacity * sizeof (GCToggleRef), NORMAL);
+ if (!tmp)
+ return FALSE;
memcpy (tmp, GC_toggleref_array, GC_toggleref_array_size * sizeof (GCToggleRef));
GC_INTERNAL_FREE (GC_toggleref_array);
GC_toggleref_array = tmp;
}
+ return TRUE;
}
-void
+int
GC_toggleref_add (GC_PTR object, int strong_ref)
{
+ int res = GC_SUCCESS;
DCL_LOCK_STATE;
# ifdef THREADS
DISABLE_SIGNALS();
if (!GC_toggleref_callback)
goto end;
- ensure_toggleref_capacity (1);
+ if (!ensure_toggleref_capacity (1)) {
+ res = GC_NO_MEMORY;
+ goto end;
+ }
GC_toggleref_array [GC_toggleref_array_size].strong_ref = strong_ref ? object : NULL;
GC_toggleref_array [GC_toggleref_array_size].weak_ref = strong_ref ? (GC_hidden_pointer)NULL : HIDE_POINTER (object);
++GC_toggleref_array_size;
UNLOCK();
ENABLE_SIGNALS();
# endif
+ return res;
}
GC_EVENT_POST_STOP_WORLD,
GC_EVENT_PRE_START_WORLD,
GC_EVENT_POST_START_WORLD
-} GCEventType;
+} GC_EventType;
-GC_API void (*GC_notify_event) GC_PROTO((GCEventType event_type));
- /* Invoked at specific points during every collection.
- */
+GC_API void GC_set_on_collection_event GC_PROTO((void (*) (GC_EventType)));
+ /* Set callback invoked at specific points */
+ /* during every collection. */
GC_API void (*GC_on_heap_resize) GC_PROTO((size_t new_size));
/* Invoked when the heap grows or shrinks */
/* Return the signal used by the gc to resume threads on posix platforms. */
/* Return -1 otherwise. */
-int GC_get_restart_signal GC_PROTO((void));
+int GC_get_thr_restart_signal GC_PROTO((void));
+
+/* Explicitly enable GC_register_my_thread() invocation. */
+GC_API void GC_allow_register_threads GC_PROTO((void));
/* Disable garbage collection. Even GC_gcollect calls will be */
/* ineffective. */
GC_API int GC_register_long_link GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
GC_API int GC_unregister_long_link GC_PROTO((GC_PTR * /* link */));
+typedef enum {
+ GC_TOGGLE_REF_DROP,
+ GC_TOGGLE_REF_STRONG,
+ GC_TOGGLE_REF_WEAK
+} GC_ToggleRefStatus;
/* toggleref support */
-GC_API void GC_toggleref_register_callback GC_PROTO((int (*proccess_toggleref) (GC_PTR obj)));
-GC_API void GC_toggleref_add (GC_PTR object, int strong_ref);
+GC_API void GC_set_toggleref_func GC_PROTO(
+ (GC_ToggleRefStatus (*proccess_toggleref) (GC_PTR obj)));
+GC_API int GC_toggleref_add (GC_PTR object, int strong_ref);
+ /* Returns GC_SUCCESS if registration succeeded (or no callback */
+ /* registered yet), GC_NO_MEMORY if failed for lack of memory. */
/* finalizer callback support */
-GC_API void GC_set_finalizer_notify_proc GC_PROTO((void (*object_finalized) (GC_PTR obj)));
+GC_API void GC_set_await_finalize_proc GC_PROTO((void (*object_finalized) (GC_PTR obj)));
/* Returns !=0 if GC_invoke_finalizers has something to do. */
/* Always returns its argument. */
GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
+#define GC_SUCCESS 0
+#define GC_DUPLICATE 1 /* Was already registered. */
+#define GC_NO_MEMORY 2 /* Failure due to lack of memory. */
+#define GC_UNIMPLEMENTED 3 /* Not yet implemented on the platform. */
+
+/* Structure representing the base of a thread stack. */
+struct GC_stack_base {
+ void * mem_base; /* Base of memory stack. */
+};
+
+/* Register the current thread, with the indicated stack base. */
+/* Returns GC_SUCCESS on success, GC_DUPLICATE if already registered. */
+/* On some platforms it returns GC_UNIMPLEMENTED. */
+GC_API int GC_register_my_thread GC_PROTO((struct GC_stack_base *));
+
/* Returns 1 if the calling thread is registered with the GC, 0 otherwise */
GC_API int GC_thread_is_registered GC_PROTO((void));
#if defined(GC_WIN32_THREADS) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)
# include <windows.h>
+# ifdef GC_INSIDE_DLL
BOOL WINAPI GC_DllMain(HINSTANCE inst, ULONG reason, LPVOID reserved);
+# endif
/*
* All threads must be created using GC_CreateThread, so that they will be
extern GC_bool GC_world_stopped;
#endif
+extern void (*GC_notify_event) GC_PROTO((GC_EventType));
+
/* Operations */
# ifndef abs
# define abs(x) ((x) < 0? (-(x)) : (x))
/* We use the allocation lock to protect thread-related data structures. */
+#ifdef THREAD_LOCAL_ALLOC
+# if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
+# define GRANULARITY 16
+# define NFREELISTS 49
+# else
+# define GRANULARITY 8
+# define NFREELISTS 65
+# endif
+ struct thread_local_freelists {
+ ptr_t ptrfree_freelists[NFREELISTS];
+ ptr_t normal_freelists[NFREELISTS];
+# ifdef GC_GCJ_SUPPORT
+ ptr_t gcj_freelists[NFREELISTS];
+# endif
+ };
+#endif
+
/* The set of all known threads. We intercept thread creation and */
/* joins. */
/* Protected by allocation/GC lock. */
/* reclamation of any data it might */
/* reference. */
# ifdef THREAD_LOCAL_ALLOC
-# if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
-# define GRANULARITY 16
-# define NFREELISTS 49
-# else
-# define GRANULARITY 8
-# define NFREELISTS 65
-# endif
/* The ith free list corresponds to size i*GRANULARITY */
# define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
# define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
# define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
(NFREELISTS-1)*GRANULARITY)
- ptr_t ptrfree_freelists[NFREELISTS];
- ptr_t normal_freelists[NFREELISTS];
-# ifdef GC_GCJ_SUPPORT
- ptr_t gcj_freelists[NFREELISTS];
-# endif
+ struct thread_local_freelists tlfs;
/* Free lists contain either a pointer or a small count */
/* reflecting the number of granules allocated at that */
/* size. */
#endif
}
-int GC_get_restart_signal GC_PROTO(())
+int GC_get_thr_restart_signal GC_PROTO(())
{
#if defined(SIG_THR_RESTART) && defined(GC_PTHREADS) && !defined(GC_MACOSX_THREADS) && !defined(GC_OPENBSD_THREADS)
return SIG_THR_RESTART;
ABORT("Failed to set thread specific allocation pointers");
}
for (i = 1; i < NFREELISTS; ++i) {
- p -> ptrfree_freelists[i] = (ptr_t)1;
- p -> normal_freelists[i] = (ptr_t)1;
+ p -> tlfs.ptrfree_freelists[i] = (ptr_t)1;
+ p -> tlfs.normal_freelists[i] = (ptr_t)1;
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[i] = (ptr_t)1;
+ p -> tlfs.gcj_freelists[i] = (ptr_t)1;
# endif
}
/* Set up the size 0 free lists. */
- p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
- p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
+ p -> tlfs.ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
+ p -> tlfs.normal_freelists[0] = (ptr_t)(&size_zero_object);
# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[0] = (ptr_t)(-1);
+ p -> tlfs.gcj_freelists[0] = (ptr_t)(-1);
# endif
}
# ifndef HANDLE_FORK
GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
# endif
- return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
- return_freelists(p -> normal_freelists, GC_objfreelist);
+ return_freelists(p -> tlfs.ptrfree_freelists, GC_aobjfreelist);
+ return_freelists(p -> tlfs.normal_freelists, GC_objfreelist);
# ifdef GC_GCJ_SUPPORT
- return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
+ return_freelists(p -> tlfs.gcj_freelists, GC_gcjobjfreelist);
# endif
}
GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
UNLOCK();
# endif
- my_fl = ((GC_thread)tsd) -> normal_freelists + index;
+ my_fl = ((GC_thread)tsd) -> tlfs.normal_freelists + index;
my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
ptr_t next = obj_link(my_entry);
} else {
int index = INDEX_FROM_BYTES(bytes);
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
- -> ptrfree_freelists + index;
+ -> tlfs.ptrfree_freelists + index;
ptr_t my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
} else {
int index = INDEX_FROM_BYTES(bytes);
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
- -> gcj_freelists + index;
+ -> tlfs.gcj_freelists + index;
ptr_t my_entry = *my_fl;
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
GC_PTR result = (GC_PTR)my_entry;
void * GC_local_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
{
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
- -> gcj_freelists + lw;
+ -> tlfs.gcj_freelists + lw;
ptr_t my_entry = *my_fl;
GC_ASSERT(GC_gcj_malloc_initialized);
for (i = 0; i < THREAD_TABLE_SZ; ++i) {
for (p = GC_threads[i]; 0 != p; p = p -> next) {
for (j = 1; j < NFREELISTS; ++j) {
- q = p -> ptrfree_freelists[j];
+ q = p -> tlfs.ptrfree_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
- q = p -> normal_freelists[j];
+ q = p -> tlfs.normal_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
# ifdef GC_GCJ_SUPPORT
- q = p -> gcj_freelists[j];
+ q = p -> tlfs.gcj_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
# endif /* GC_GCJ_SUPPORT */
}
return me;
}
-int GC_thread_register_foreign (void *base_addr)
+void GC_allow_register_threads (void)
+{
+ /* No-op for GC pre-v7. */
+}
+
+int GC_register_my_thread (struct GC_stack_base *sb)
{
struct start_info si = { 0, }; /* stacked for legibility & locking */
GC_thread me;
# ifdef DEBUG_THREADS
- GC_printf1( "GC_thread_register_foreign %p\n", &si );
+ GC_printf1( "GC_register_my_thread %p\n", &si );
# endif
si.flags = FOREIGN_THREAD;
if (!parallel_initialized) GC_init_parallel();
LOCK();
if (!GC_thr_initialized) GC_thr_init();
-
+ me = GC_lookup_thread(pthread_self());
UNLOCK();
+ if (me != NULL)
+ return GC_DUPLICATE;
- me = GC_start_routine_head(&si, base_addr, NULL, NULL);
-
- return me != NULL;
+ (void)GC_start_routine_head(&si, sb -> mem_base, NULL, NULL);
+ return GC_SUCCESS;
}
void * GC_start_routine(void * arg)
return ptr ? 1 : 0;
}
-int GC_thread_register_foreign (void *base_addr)
+void GC_allow_register_threads (void)
+{
+ /* No-op for GC pre-v7. */
+}
+
+int GC_register_my_thread (struct GC_stack_base *sb)
{
/* FIXME: */
- return 0;
+ return GC_UNIMPLEMENTED;
}
void GC_register_altstack (void *stack, int stack_size, void *altstack, int altstack_size)
#endif
}
+void GC_allow_register_threads (void)
+{
+ /* No-op for GC pre-v7. */
+}
+
+int GC_register_my_thread (struct GC_stack_base *sb)
+{
+# if defined(GC_DLL) || defined(GC_INSIDE_DLL)
+ /* Registered by DllMain. */
+ return GC_DUPLICATE;
+# else
+ /* TODO: Implement. */
+ return GC_UNIMPLEMENTED;
+# endif
+}
+
void GC_register_altstack (void *stack, int stack_size, void *altstack, int altstack_size)
{
}
GC_finalizer_notifier = mono_gc_finalize_notify;
GC_init_gcj_malloc (5, NULL);
+ GC_allow_register_threads ();
if ((env = g_getenv ("MONO_GC_PARAMS"))) {
char **ptr, **opts = g_strsplit (env, ",", -1);
return GC_thread_is_registered ();
}
-extern int GC_thread_register_foreign (void *base_addr);
-
gboolean
mono_gc_register_thread (void *baseptr)
{
static void*
boehm_thread_register (MonoThreadInfo* info, void *baseptr)
{
- if (mono_gc_is_gc_thread())
- return info;
-#if !defined(HOST_WIN32)
- return GC_thread_register_foreign (baseptr) ? info : NULL;
-#else
- return NULL;
-#endif
+ struct GC_stack_base sb;
+ int res;
+
+ /* TODO: use GC_get_stack_base instead of baseptr. */
+ sb.mem_base = baseptr;
+ res = GC_register_my_thread (&sb);
+ if (res == GC_UNIMPLEMENTED)
+ return NULL; /* Cannot happen with GC v7+. */
+ return info;
}
static void
static gint64 gc_start_time;
static void
-on_gc_notification (GCEventType event)
+on_gc_notification (GC_EventType event)
{
MonoGCEvent e = (MonoGCEvent)event;
void
mono_gc_enable_events (void)
{
- GC_notify_event = on_gc_notification;
+ GC_set_on_collection_event (on_gc_notification);
GC_on_heap_resize = on_gc_heap_resize;
}
int
mono_gc_get_restart_signal (void)
{
- return GC_get_restart_signal ();
+ return GC_get_thr_restart_signal ();
}
#if defined(USE_COMPILER_TLS) && defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
mono_mb_emit_byte (mb, 0x0D); /* CEE_MONO_TLS */
mono_mb_emit_i4 (mb, tls_key);
if (atype == ATYPE_FREEPTR || atype == ATYPE_FREEPTR_FOR_BOX || atype == ATYPE_STRING)
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (struct GC_Thread_Rep, ptrfree_freelists));
+ mono_mb_emit_icon (mb, G_STRUCT_OFFSET (struct GC_Thread_Rep, tlfs)
+ + G_STRUCT_OFFSET (struct thread_local_freelists,
+ ptrfree_freelists));
else if (atype == ATYPE_NORMAL)
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (struct GC_Thread_Rep, normal_freelists));
+ mono_mb_emit_icon (mb, G_STRUCT_OFFSET (struct GC_Thread_Rep, tlfs)
+ + G_STRUCT_OFFSET (struct thread_local_freelists,
+ normal_freelists));
else if (atype == ATYPE_GCJ)
- mono_mb_emit_icon (mb, G_STRUCT_OFFSET (struct GC_Thread_Rep, gcj_freelists));
+ mono_mb_emit_icon (mb, G_STRUCT_OFFSET (struct GC_Thread_Rep, tlfs)
+ + G_STRUCT_OFFSET (struct thread_local_freelists,
+ gcj_freelists));
else
g_assert_not_reached ();
mono_mb_emit_byte (mb, MONO_CEE_ADD);
void
mono_gc_toggleref_add (MonoObject *object, mono_bool strong_ref)
{
- GC_toggleref_add ((GC_PTR)object, (int)strong_ref);
+ if (GC_toggleref_add ((GC_PTR)object, (int)strong_ref) != GC_SUCCESS)
+ g_error ("GC_toggleref_add failed\n");
}
void
mono_gc_toggleref_register_callback (MonoToggleRefStatus (*proccess_toggleref) (MonoObject *obj))
{
- GC_toggleref_register_callback ((int (*) (GC_PTR obj)) proccess_toggleref);
+ GC_set_toggleref_func ((GC_ToggleRefStatus (*) (GC_PTR obj)) proccess_toggleref);
}
/* Test support code */
fin_callbacks = *callbacks;
- GC_set_finalizer_notify_proc ((void (*) (GC_PTR))fin_notifier);
+ GC_set_await_finalize_proc ((void (*) (GC_PTR))fin_notifier);
}
#define BITMAP_SIZE (sizeof (*((HandleData *)NULL)->bitmap) * CHAR_BIT)
# endif
# endif
+# define GC_INSIDE_DLL
# include <gc.h>
# include <gc_typed.h>
# include <gc_mark.h>