1 #include "private/pthread_support.h"
3 # if defined(GC_DARWIN_THREADS)
5 #include "mono/utils/mono-compiler.h"
7 #ifdef MONO_DEBUGGER_SUPPORTED
8 #include "include/libgc-mono-debugger.h"
11 /* From "Inside Mac OS X - Mach-O Runtime Architecture" published by Apple
13 "The space beneath the stack pointer, where a new stack frame would normally
14 be allocated, is called the red zone. This area as shown in Figure 3-2 may
15 be used for any purpose as long as a new stack frame does not need to be
18 Page 50: "If a leaf procedure's red zone usage would exceed 224 bytes, then
19 it must set up a stack frame just like routines that call other routines."
23 # define PPC_RED_ZONE_SIZE 224
24 # elif CPP_WORDSZ == 64
25 # define PPC_RED_ZONE_SIZE 320
29 typedef struct StackFrame {
30 unsigned long savedSP;
31 unsigned long savedCR;
32 unsigned long savedLR;
33 unsigned long reserved[2];
34 unsigned long savedRTOC;
37 unsigned long FindTopOfStack(unsigned int stack_start) {
40 if (stack_start == 0) {
43 __asm__ volatile("lwz %0,0(r1)" : "=r" (frame));
45 __asm__ volatile("ldz %0,0(r1)" : "=r" (frame));
49 frame = (StackFrame *)stack_start;
53 /* GC_printf1("FindTopOfStack start at sp = %p\n", frame); */
56 if (frame->savedSP == 0) break;
57 /* if there are no more stack frames, stop */
59 frame = (StackFrame*)frame->savedSP;
61 /* we do these next two checks after going to the next frame
62 because the LR for the first stack frame in the loop
63 is not set up on purpose, so we shouldn't check it. */
64 if ((frame->savedLR & ~3) == 0) break; /* if the next LR is bogus, stop */
65 if ((~(frame->savedLR) & ~3) == 0) break; /* ditto */
69 /* GC_printf1("FindTopOfStack finish at sp = %p\n", frame); */
72 return (unsigned long)frame;
75 #ifdef DARWIN_DONT_PARSE_STACK
76 void GC_push_all_stacks() {
83 ppc_thread_state_t state;
84 mach_msg_type_number_t thread_state_count = PPC_THREAD_STATE_COUNT;
86 i386_thread_state_t state;
87 mach_msg_type_number_t thread_state_count = i386_THREAD_STATE_COUNT;
89 arm_thread_state_t state;
90 mach_msg_type_number_t thread_state_count = ARM_THREAD_STATE_COUNT;
92 # error FIXME for non-x86 || ppc architectures
93 mach_msg_type_number_t thread_state_count = MACHINE_THREAD_STATE_COUNT;
97 if (!GC_thr_initialized) GC_thr_init();
99 for(i=0;i<THREAD_TABLE_SZ;i++) {
100 for(p=GC_threads[i];p!=0;p=p->next) {
101 if(p -> flags & FINISHED) continue;
102 if(pthread_equal(p->id,me)) {
105 /* Get the thread state (registers, etc) */
106 r = thread_get_state(
107 p->stop_info.mach_thread,
108 GC_MACH_THREAD_STATE_FLAVOR,
110 &thread_state_count);
111 if(r != KERN_SUCCESS) ABORT("thread_get_state failed");
114 #if defined(_STRUCT_X86_EXCEPTION_STATE32)
117 GC_push_one(state.__eax);
118 GC_push_one(state.__ebx);
119 GC_push_one(state.__ecx);
120 GC_push_one(state.__edx);
121 GC_push_one(state.__edi);
122 GC_push_one(state.__esi);
123 GC_push_one(state.__ebp);
127 GC_push_one(state.eax);
128 GC_push_one(state.ebx);
129 GC_push_one(state.ecx);
130 GC_push_one(state.edx);
131 GC_push_one(state.edi);
132 GC_push_one(state.esi);
133 GC_push_one(state.ebp);
135 #elif defined(POWERPC)
136 #if defined(_STRUCT_PPC_EXCEPTION_STATE)
137 lo = (void*)(state.__r1 - PPC_RED_ZONE_SIZE);
139 GC_push_one(state.__r0);
140 GC_push_one(state.__r2);
141 GC_push_one(state.__r3);
142 GC_push_one(state.__r4);
143 GC_push_one(state.__r5);
144 GC_push_one(state.__r6);
145 GC_push_one(state.__r7);
146 GC_push_one(state.__r8);
147 GC_push_one(state.__r9);
148 GC_push_one(state.__r10);
149 GC_push_one(state.__r11);
150 GC_push_one(state.__r12);
151 GC_push_one(state.__r13);
152 GC_push_one(state.__r14);
153 GC_push_one(state.__r15);
154 GC_push_one(state.__r16);
155 GC_push_one(state.__r17);
156 GC_push_one(state.__r18);
157 GC_push_one(state.__r19);
158 GC_push_one(state.__r20);
159 GC_push_one(state.__r21);
160 GC_push_one(state.__r22);
161 GC_push_one(state.__r23);
162 GC_push_one(state.__r24);
163 GC_push_one(state.__r25);
164 GC_push_one(state.__r26);
165 GC_push_one(state.__r27);
166 GC_push_one(state.__r28);
167 GC_push_one(state.__r29);
168 GC_push_one(state.__r30);
169 GC_push_one(state.__r31);
171 lo = (void*)(state.r1 - PPC_RED_ZONE_SIZE);
173 GC_push_one(state.r0);
174 GC_push_one(state.r2);
175 GC_push_one(state.r3);
176 GC_push_one(state.r4);
177 GC_push_one(state.r5);
178 GC_push_one(state.r6);
179 GC_push_one(state.r7);
180 GC_push_one(state.r8);
181 GC_push_one(state.r9);
182 GC_push_one(state.r10);
183 GC_push_one(state.r11);
184 GC_push_one(state.r12);
185 GC_push_one(state.r13);
186 GC_push_one(state.r14);
187 GC_push_one(state.r15);
188 GC_push_one(state.r16);
189 GC_push_one(state.r17);
190 GC_push_one(state.r18);
191 GC_push_one(state.r19);
192 GC_push_one(state.r20);
193 GC_push_one(state.r21);
194 GC_push_one(state.r22);
195 GC_push_one(state.r23);
196 GC_push_one(state.r24);
197 GC_push_one(state.r25);
198 GC_push_one(state.r26);
199 GC_push_one(state.r27);
200 GC_push_one(state.r28);
201 GC_push_one(state.r29);
202 GC_push_one(state.r30);
203 GC_push_one(state.r31);
206 lo = (void*)state.__sp;
208 GC_push_one(state.__r[0]);
209 GC_push_one(state.__r[1]);
210 GC_push_one(state.__r[2]);
211 GC_push_one(state.__r[3]);
212 GC_push_one(state.__r[4]);
213 GC_push_one(state.__r[5]);
214 GC_push_one(state.__r[6]);
215 GC_push_one(state.__r[7]);
216 GC_push_one(state.__r[8]);
217 GC_push_one(state.__r[9]);
218 GC_push_one(state.__r[10]);
219 GC_push_one(state.__r[11]);
220 GC_push_one(state.__r[12]);
221 /* GC_push_one(state.__sp); */
222 GC_push_one(state.__lr);
223 GC_push_one(state.__pc);
224 GC_push_one(state.__cpsr);
226 # error FIXME for non-x86 || ppc architectures
229 if(p->flags & MAIN_THREAD)
234 GC_printf3("Darwin: Stack for thread 0x%lx = [%lx,%lx)\n",
235 (unsigned long) p -> id,
240 GC_push_all_stack(lo,hi);
241 } /* for(p=GC_threads[i]...) */
242 } /* for(i=0;i<THREAD_TABLE_SZ...) */
245 #else /* !DARWIN_DONT_PARSE_STACK; Use FindTopOfStack() */
247 void GC_push_all_stacks() {
253 thread_act_array_t act_list = 0;
254 mach_msg_type_number_t listcount = 0;
256 me = mach_thread_self();
257 if (!GC_thr_initialized) GC_thr_init();
259 my_task = current_task();
260 r = task_threads(my_task, &act_list, &listcount);
261 if(r != KERN_SUCCESS) ABORT("task_threads failed");
262 for(i = 0; i < listcount; i++) {
263 thread_act_t thread = act_list[i];
266 hi = (ptr_t)FindTopOfStack(0);
268 # if defined(POWERPC)
269 # if CPP_WORDSZ == 32
270 ppc_thread_state_t info;
272 ppc_thread_state64_t info;
274 mach_msg_type_number_t outCount = THREAD_STATE_MAX;
275 r = thread_get_state(thread, GC_MACH_THREAD_STATE_FLAVOR,
276 (natural_t *)&info, &outCount);
277 if(r != KERN_SUCCESS) continue;
279 #if defined(_STRUCT_PPC_EXCEPTION_STATE)
280 lo = (void*)(info.__r1 - PPC_RED_ZONE_SIZE);
281 hi = (ptr_t)FindTopOfStack(info.__r1);
283 GC_push_one(info.__r0);
284 GC_push_one(info.__r2);
285 GC_push_one(info.__r3);
286 GC_push_one(info.__r4);
287 GC_push_one(info.__r5);
288 GC_push_one(info.__r6);
289 GC_push_one(info.__r7);
290 GC_push_one(info.__r8);
291 GC_push_one(info.__r9);
292 GC_push_one(info.__r10);
293 GC_push_one(info.__r11);
294 GC_push_one(info.__r12);
295 GC_push_one(info.__r13);
296 GC_push_one(info.__r14);
297 GC_push_one(info.__r15);
298 GC_push_one(info.__r16);
299 GC_push_one(info.__r17);
300 GC_push_one(info.__r18);
301 GC_push_one(info.__r19);
302 GC_push_one(info.__r20);
303 GC_push_one(info.__r21);
304 GC_push_one(info.__r22);
305 GC_push_one(info.__r23);
306 GC_push_one(info.__r24);
307 GC_push_one(info.__r25);
308 GC_push_one(info.__r26);
309 GC_push_one(info.__r27);
310 GC_push_one(info.__r28);
311 GC_push_one(info.__r29);
312 GC_push_one(info.__r30);
313 GC_push_one(info.__r31);
315 lo = (void*)(info.r1 - PPC_RED_ZONE_SIZE);
316 hi = (ptr_t)FindTopOfStack(info.r1);
318 GC_push_one(info.r0);
319 GC_push_one(info.r2);
320 GC_push_one(info.r3);
321 GC_push_one(info.r4);
322 GC_push_one(info.r5);
323 GC_push_one(info.r6);
324 GC_push_one(info.r7);
325 GC_push_one(info.r8);
326 GC_push_one(info.r9);
327 GC_push_one(info.r10);
328 GC_push_one(info.r11);
329 GC_push_one(info.r12);
330 GC_push_one(info.r13);
331 GC_push_one(info.r14);
332 GC_push_one(info.r15);
333 GC_push_one(info.r16);
334 GC_push_one(info.r17);
335 GC_push_one(info.r18);
336 GC_push_one(info.r19);
337 GC_push_one(info.r20);
338 GC_push_one(info.r21);
339 GC_push_one(info.r22);
340 GC_push_one(info.r23);
341 GC_push_one(info.r24);
342 GC_push_one(info.r25);
343 GC_push_one(info.r26);
344 GC_push_one(info.r27);
345 GC_push_one(info.r28);
346 GC_push_one(info.r29);
347 GC_push_one(info.r30);
348 GC_push_one(info.r31);
350 # elif defined(I386) /* !POWERPC */
351 /* FIXME: Remove after testing: */
352 WARN("This is completely untested and likely will not work\n", 0);
353 i386_thread_state_t info;
354 mach_msg_type_number_t outCount = THREAD_STATE_MAX;
355 r = thread_get_state(thread, GC_MACH_THREAD_STATE_FLAVOR,
356 (natural_t *)&info, &outCount);
357 if(r != KERN_SUCCESS) continue;
359 #if defined(_STRUCT_X86_EXCEPTION_STATE32)
360 lo = (void*)info.__esp;
361 hi = (ptr_t)FindTopOfStack(info.__esp);
363 GC_push_one(info.__eax);
364 GC_push_one(info.__ebx);
365 GC_push_one(info.__ecx);
366 GC_push_one(info.__edx);
367 GC_push_one(info.__edi);
368 GC_push_one(info.__esi);
369 GC_push_one(info.__ebp);
370 /* GC_push_one(info.__esp); */
371 GC_push_one(info.__ss);
372 GC_push_one(info.__eip);
373 GC_push_one(info.__cs);
374 GC_push_one(info.__ds);
375 GC_push_one(info.__es);
376 GC_push_one(info.__fs);
377 GC_push_one(info.__gs);
379 lo = (void*)info.esp;
380 hi = (ptr_t)FindTopOfStack(info.esp);
382 GC_push_one(info.eax);
383 GC_push_one(info.ebx);
384 GC_push_one(info.ecx);
385 GC_push_one(info.edx);
386 GC_push_one(info.edi);
387 GC_push_one(info.esi);
388 GC_push_one(info.ebp);
389 /* GC_push_one(info.esp); */
390 GC_push_one(info.ss);
391 GC_push_one(info.eip);
392 GC_push_one(info.cs);
393 GC_push_one(info.ds);
394 GC_push_one(info.es);
395 GC_push_one(info.fs);
396 GC_push_one(info.gs);
398 # elif defined(ARM) /* !I386 */
399 arm_thread_state_t info;
400 mach_msg_type_number_t outCount = THREAD_STATE_MAX;
401 r = thread_get_state(thread, GC_MACH_THREAD_STATE_FLAVOR,
402 (natural_t *)&info, &outCount);
403 if(r != KERN_SUCCESS) continue;
405 lo = (void*)info.__sp;
406 hi = (ptr_t)FindTopOfStack(info.__sp);
408 GC_push_one(info.__r[0]);
409 GC_push_one(info.__r[1]);
410 GC_push_one(info.__r[2]);
411 GC_push_one(info.__r[3]);
412 GC_push_one(info.__r[4]);
413 GC_push_one(info.__r[5]);
414 GC_push_one(info.__r[6]);
415 GC_push_one(info.__r[7]);
416 GC_push_one(info.__r[8]);
417 GC_push_one(info.__r[9]);
418 GC_push_one(info.__r[10]);
419 GC_push_one(info.__r[11]);
420 GC_push_one(info.__r[12]);
421 /* GC_push_one(info.__sp); */
422 GC_push_one(info.__lr);
423 GC_push_one(info.__pc);
424 GC_push_one(info.__cpsr);
428 GC_printf3("Darwin: Stack for thread 0x%lx = [%lx,%lx)\n",
429 (unsigned long) thread,
434 GC_push_all_stack(lo, hi);
435 mach_port_deallocate(my_task, thread);
436 } /* for(p=GC_threads[i]...) */
437 vm_deallocate(my_task, (vm_address_t)act_list, sizeof(thread_t) * listcount);
438 mach_port_deallocate(my_task, me);
440 #endif /* !DARWIN_DONT_PARSE_STACK */
442 static mach_port_t GC_mach_handler_thread;
443 static int GC_use_mach_handler_thread = 0;
445 #define SUSPEND_THREADS_SIZE 2048
446 static struct GC_mach_thread GC_mach_threads[SUSPEND_THREADS_SIZE];
447 static int GC_mach_threads_count;
449 void GC_stop_init() {
452 for (i = 0; i < SUSPEND_THREADS_SIZE; i++) {
453 GC_mach_threads[i].thread = 0;
454 GC_mach_threads[i].already_suspended = 0;
456 GC_mach_threads_count = 0;
459 /* returns true if there's a thread in act_list that wasn't in old_list */
460 int GC_suspend_thread_list(thread_act_array_t act_list, int count,
461 thread_act_array_t old_list, int old_count) {
462 mach_port_t my_thread = mach_thread_self();
467 for(i = 0; i < count; i++) {
468 thread_act_t thread = act_list[i];
470 GC_printf1("Attempting to suspend thread %p\n", thread);
472 /* find the current thread in the old list */
474 for(j = 0; j < old_count; j++) {
475 thread_act_t old_thread = old_list[j];
476 if (old_thread == thread) {
482 /* add it to the GC_mach_threads list */
483 GC_mach_threads[GC_mach_threads_count].thread = thread;
484 /* default is not suspended */
485 GC_mach_threads[GC_mach_threads_count].already_suspended = 0;
489 if (thread != my_thread &&
490 (!GC_use_mach_handler_thread
491 || (GC_use_mach_handler_thread
492 && GC_mach_handler_thread != thread))) {
493 struct thread_basic_info info;
494 mach_msg_type_number_t outCount = THREAD_INFO_MAX;
495 kern_return_t kern_result = thread_info(thread, THREAD_BASIC_INFO,
496 (thread_info_t)&info, &outCount);
497 if(kern_result != KERN_SUCCESS) {
498 /* the thread may have quit since the thread_threads () call
499 * we mark already_suspended so it's not dealt with anymore later
502 GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
503 GC_mach_threads_count++;
508 GC_printf2("Thread state for 0x%lx = %d\n", thread, info.run_state);
511 GC_mach_threads[GC_mach_threads_count].already_suspended = info.suspend_count;
513 if (info.suspend_count) continue;
516 GC_printf1("Suspending 0x%lx\n", thread);
518 /* Suspend the thread */
519 kern_result = thread_suspend(thread);
520 if(kern_result != KERN_SUCCESS) {
521 /* the thread may have quit since the thread_threads () call
522 * we mark already_suspended so it's not dealt with anymore later
525 GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
526 GC_mach_threads_count++;
531 if (!found) GC_mach_threads_count++;
534 mach_port_deallocate(current_task(), my_thread);
539 /* Caller holds allocation lock. */
544 task_t my_task = current_task();
545 mach_port_t my_thread = mach_thread_self();
546 kern_return_t kern_result;
547 thread_act_array_t act_list, prev_list;
548 mach_msg_type_number_t listcount, prevcount;
551 GC_printf1("Stopping the world from 0x%lx\n", mach_thread_self());
554 /* clear out the mach threads list table */
557 /* Make sure all free list construction has stopped before we start. */
558 /* No new construction can start, since free list construction is */
559 /* required to acquire and release the GC lock before it starts, */
560 /* and we have the lock. */
561 # ifdef PARALLEL_MARK
562 GC_acquire_mark_lock();
563 GC_ASSERT(GC_fl_builder_count == 0);
564 /* We should have previously waited for it to become zero. */
565 # endif /* PARALLEL_MARK */
567 /* Loop stopping threads until you have gone over the whole list
568 twice without a new one appearing. thread_create() won't
569 return (and thus the thread stop) until the new thread
570 exists, so there is no window whereby you could stop a
571 thread, recognise it is stopped, but then have a new thread
572 it created before stopping show up later.
580 kern_result = task_threads(my_task, &act_list, &listcount);
582 if(kern_result == KERN_SUCCESS) {
583 result = GC_suspend_thread_list(act_list, listcount,
584 prev_list, prevcount);
587 if(prev_list != NULL) {
588 for(i = 0; i < prevcount; i++)
589 mach_port_deallocate(my_task, prev_list[i]);
591 vm_deallocate(my_task, (vm_address_t)prev_list, sizeof(thread_t) * prevcount);
594 prev_list = act_list;
595 prevcount = listcount;
599 for(i = 0; i < listcount; i++)
600 mach_port_deallocate(my_task, act_list[i]);
602 vm_deallocate(my_task, (vm_address_t)act_list, sizeof(thread_t) * listcount);
607 extern void GC_mprotect_stop();
612 # ifdef PARALLEL_MARK
613 GC_release_mark_lock();
616 GC_printf1("World stopped from 0x%lx\n", my_thread);
619 mach_port_deallocate(my_task, my_thread);
622 /* Caller holds allocation lock, and has held it continuously since */
623 /* the world stopped. */
624 void GC_start_world()
626 task_t my_task = current_task();
627 mach_port_t my_thread = mach_thread_self();
630 kern_return_t kern_result;
631 thread_act_array_t act_list;
632 mach_msg_type_number_t listcount;
633 struct thread_basic_info info;
634 mach_msg_type_number_t outCount = THREAD_INFO_MAX;
637 GC_printf0("World starting\n");
642 extern void GC_mprotect_resume();
643 GC_mprotect_resume();
647 kern_result = task_threads(my_task, &act_list, &listcount);
648 for(i = 0; i < listcount; i++) {
649 thread_act_t thread = act_list[i];
650 if (thread != my_thread &&
651 (!GC_use_mach_handler_thread ||
652 (GC_use_mach_handler_thread && GC_mach_handler_thread != thread))) {
653 for(j = 0; j < GC_mach_threads_count; j++) {
654 if (thread == GC_mach_threads[j].thread) {
655 if (GC_mach_threads[j].already_suspended) {
657 GC_printf1("Not resuming already suspended thread %p\n", thread);
661 kern_result = thread_info(thread, THREAD_BASIC_INFO,
662 (thread_info_t)&info, &outCount);
663 if(kern_result != KERN_SUCCESS) continue;
665 GC_printf2("Thread state for 0x%lx = %d\n", thread,
667 GC_printf1("Resuming 0x%lx\n", thread);
669 /* Resume the thread */
670 kern_result = thread_resume(thread);
671 if(kern_result != KERN_SUCCESS) continue;
676 mach_port_deallocate(my_task, thread);
678 vm_deallocate(my_task, (vm_address_t)act_list, sizeof(thread_t) * listcount);
680 mach_port_deallocate(my_task, my_thread);
682 GC_printf0("World started\n");
686 void GC_darwin_register_mach_handler_thread(mach_port_t thread) {
687 GC_mach_handler_thread = thread;
688 GC_use_mach_handler_thread = 1;
691 #ifdef MONO_DEBUGGER_SUPPORTED
692 GCThreadFunctions *gc_thread_vtable = NULL;
695 GC_mono_debugger_get_stack_ptr (void)
699 me = GC_lookup_thread (pthread_self ());
700 return &me->stop_info.stack_ptr;