[runtime] Use HOST_ defines instead of PLATFORM_ defines. (#5362)
[mono.git] / libgc / include / private / pthread_support.h
1 #ifndef GC_PTHREAD_SUPPORT_H
2 #define GC_PTHREAD_SUPPORT_H
3
4 # include "private/gc_priv.h"
5
6 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
7      && !defined(GC_WIN32_THREADS)
8      
9 #if defined(GC_DARWIN_THREADS)
10 # include "private/darwin_stop_world.h"
11 #elif defined(GC_OPENBSD_THREADS)
12 # include "private/openbsd_stop_world.h"
13 #else
14 # include "private/pthread_stop_world.h"
15 #endif
16
17 /* We use the allocation lock to protect thread-related data structures. */
18
19 #ifdef THREAD_LOCAL_ALLOC
20 #   if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
21 #       define GRANULARITY 16
22 #       define NFREELISTS 49
23 #   else
24 #       define GRANULARITY 8
25 #       define NFREELISTS 65
26 #   endif
27     struct thread_local_freelists {
28         ptr_t ptrfree_freelists[NFREELISTS];
29         ptr_t normal_freelists[NFREELISTS];
30 #       ifdef GC_GCJ_SUPPORT
31             ptr_t gcj_freelists[NFREELISTS];
32 #       endif
33     };
34 #endif
35
36 /* The set of all known threads.  We intercept thread creation and      */
37 /* joins.                                                               */
38 /* Protected by allocation/GC lock.                                     */
39 /* Some of this should be declared volatile, but that's inconsistent    */
40 /* with some library routine declarations.                              */
41 typedef struct GC_Thread_Rep {
42     struct GC_Thread_Rep * next;  /* More recently allocated threads    */
43                                   /* with a given pthread id come       */
44                                   /* first.  (All but the first are     */
45                                   /* guaranteed to be dead, but we may  */
46                                   /* not yet have registered the join.) */
47     pthread_t id;
48 #ifdef HOST_ANDROID
49     pid_t kernel_id;
50 #endif
51     /* Extra bookkeeping information the stopping code uses */
52     struct thread_stop_info stop_info;
53     
54     short flags;
55 #       define FINISHED 1       /* Thread has exited.   */
56 #       define DETACHED 2       /* Thread is intended to be detached.   */
57 #       define MAIN_THREAD 4    /* True for the original thread only.   */
58 #       define FOREIGN_THREAD 8 /* Will not be de-registered by us      */
59     short thread_blocked;       /* Protected by GC lock.                */
60                                 /* Treated as a boolean value.  If set, */
61                                 /* thread will acquire GC lock before   */
62                                 /* doing any pointer manipulations, and */
63                                 /* has set its sp value.  Thus it does  */
64                                 /* not need to be sent a signal to stop */
65                                 /* it.                                  */
66     ptr_t stack_end;            /* Cold end of the stack.               */
67         ptr_t altstack; /* The start of the altstack if there is one, NULL otherwise */
68         int altstack_size; /* The size of the altstack if there is one */
69         ptr_t stack; /* The start of the normal stack */
70         int stack_size; /* The size of the normal stack */
71 #   ifdef IA64
72         ptr_t backing_store_end;
73         ptr_t backing_store_ptr;
74 #   endif
75     void * status;              /* The value returned from the thread.  */
76                                 /* Used only to avoid premature         */
77                                 /* reclamation of any data it might     */
78                                 /* reference.                           */
79 #   ifdef THREAD_LOCAL_ALLOC
80         /* The ith free list corresponds to size i*GRANULARITY */
81 #       define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
82 #       define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
83 #       define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
84                                     (NFREELISTS-1)*GRANULARITY)
85         struct thread_local_freelists tlfs;
86                 /* Free lists contain either a pointer or a small count */
87                 /* reflecting the number of granules allocated at that  */
88                 /* size.                                                */
89                 /* 0 ==> thread-local allocation in use, free list      */
90                 /*       empty.                                         */
91                 /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
92                 /*       too few objects of this size have been         */
93                 /*       allocated by this thread.                      */
94                 /* >= HBLKSIZE  => pointer to nonempty free list.       */
95                 /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to      */
96                 /*    local alloc, equivalent to 0.                     */
97 #       define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
98                 /* Don't use local free lists for up to this much       */
99                 /* allocation.                                          */
100 #   endif
101 } * GC_thread;
102
103 # define THREAD_TABLE_SZ 128    /* Must be power of 2   */
104 extern volatile GC_thread GC_threads[THREAD_TABLE_SZ];
105 #ifdef NACL
106 extern __thread GC_thread gc_thread_self;
107 #endif
108
109 extern GC_bool GC_thr_initialized;
110
111 GC_thread GC_lookup_thread(pthread_t id);
112
113 void GC_thread_deregister_foreign (void *data);
114
115 void GC_stop_init(void);
116
117 extern GC_bool GC_in_thread_creation;
118         /* We may currently be in thread creation or destruction.       */
119         /* Only set to TRUE while allocation lock is held.              */
120         /* When set, it is OK to run GC from unknown thread.            */
121
122 #endif /* GC_PTHREADS && !GC_SOLARIS_THREADS.... etc */
123 #endif /* GC_PTHREAD_SUPPORT_H */