2 * exceptions-x86.c: exception support for x86
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
15 #include <mono/arch/x86/x86-codegen.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/metadata/threads.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/exception.h>
21 #include <mono/metadata/gc-internal.h>
22 #include <mono/metadata/mono-debug.h>
23 #include <mono/utils/mono-mmap.h>
28 #include "debug-mini.h"
31 static void (*restore_stack) (void *);
33 static MonoW32ExceptionHandler fpe_handler;
34 static MonoW32ExceptionHandler ill_handler;
35 static MonoW32ExceptionHandler segv_handler;
37 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
39 #define W32_SEH_HANDLE_EX(_ex) \
40 if (_ex##_handler) _ex##_handler((int)sctx)
43 * mono_win32_get_handle_stackoverflow (void):
45 * Returns a pointer to a method which restores the current context stack
46 * and calls handle_exceptions, when done restores the original stack.
49 mono_win32_get_handle_stackoverflow (void)
51 static guint8 *start = NULL;
57 /* restore_contect (void *sigctx) */
58 start = code = mono_global_codeman_reserve (128);
60 /* load context into ebx */
61 x86_mov_reg_membase (code, X86_EBX, X86_ESP, 4, 4);
63 /* move current stack into edi for later restore */
64 x86_mov_reg_reg (code, X86_EDI, X86_ESP, 4);
66 /* use the new freed stack from sigcontext */
67 x86_mov_reg_membase (code, X86_ESP, X86_EBX, G_STRUCT_OFFSET (struct sigcontext, esp), 4);
69 /* get the current domain */
70 x86_call_code (code, mono_domain_get);
72 /* get stack overflow exception from domain object */
73 x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoDomain, stack_overflow_ex), 4);
75 /* call mono_arch_handle_exception (sctx, stack_overflow_exception_obj, FALSE) */
76 x86_push_imm (code, 0);
77 x86_push_reg (code, X86_EAX);
78 x86_push_reg (code, X86_EBX);
79 x86_call_code (code, mono_arch_handle_exception);
81 /* restore the SEH handler stack */
82 x86_mov_reg_reg (code, X86_ESP, X86_EDI, 4);
90 /* Special hack to workaround the fact that the
91 * when the SEH handler is called the stack is
92 * to small to recover.
94 * Stack walking part of this method is from mono_handle_exception
97 * - walk the stack to free some space (64k)
98 * - set esp to new stack location
99 * - call mono_arch_handle_exception with stack overflow exception
100 * - set esp to SEH handlers stack
104 win32_handle_stack_overflow (EXCEPTION_POINTERS* ep, struct sigcontext *sctx)
108 MonoDomain *domain = mono_domain_get ();
109 MonoJitInfo *ji, rji;
110 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
111 MonoLMF *lmf = jit_tls->lmf;
112 MonoContext initial_ctx;
114 guint32 free_stack = 0;
116 /* convert sigcontext to MonoContext (due to reuse of stack walking helpers */
117 mono_arch_sigctx_to_monoctx (sctx, &ctx);
119 /* get our os page size */
121 page_size = si.dwPageSize;
123 /* Let's walk the stack to recover
124 * the needed stack space (if possible)
126 memset (&rji, 0, sizeof (rji));
129 free_stack = (guint8*)(MONO_CONTEXT_GET_BP (&ctx)) - (guint8*)(MONO_CONTEXT_GET_BP (&initial_ctx));
131 /* try to free 64kb from our stack */
135 ji = mono_arch_find_jit_info (domain, jit_tls, &rji, &rji, &ctx, &new_ctx, &lmf, NULL);
137 g_warning ("Exception inside function without unwind info");
138 g_assert_not_reached ();
141 if (ji != (gpointer)-1) {
142 free_stack = (guint8*)(MONO_CONTEXT_GET_BP (&ctx)) - (guint8*)(MONO_CONTEXT_GET_BP (&initial_ctx));
145 /* todo: we should call abort if ji is -1 */
147 } while (free_stack < 64 * 1024 && ji != (gpointer) -1);
149 /* convert into sigcontext to be used in mono_arch_handle_exception */
150 mono_arch_monoctx_to_sigctx (&ctx, sctx);
152 /* todo: install new stack-guard page */
154 /* use the new stack and call mono_arch_handle_exception () */
155 restore_stack (sctx);
159 * Unhandled Exception Filter
160 * Top-level per-process exception handler.
162 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
164 EXCEPTION_RECORD* er;
166 struct sigcontext* sctx;
169 res = EXCEPTION_CONTINUE_EXECUTION;
171 er = ep->ExceptionRecord;
172 ctx = ep->ContextRecord;
173 sctx = g_malloc(sizeof(struct sigcontext));
175 /* Copy Win32 context to UNIX style context */
176 sctx->eax = ctx->Eax;
177 sctx->ebx = ctx->Ebx;
178 sctx->ecx = ctx->Ecx;
179 sctx->edx = ctx->Edx;
180 sctx->ebp = ctx->Ebp;
181 sctx->esp = ctx->Esp;
182 sctx->esi = ctx->Esi;
183 sctx->edi = ctx->Edi;
184 sctx->eip = ctx->Eip;
186 switch (er->ExceptionCode) {
187 case EXCEPTION_STACK_OVERFLOW:
188 win32_handle_stack_overflow (ep, sctx);
190 case EXCEPTION_ACCESS_VIOLATION:
191 W32_SEH_HANDLE_EX(segv);
193 case EXCEPTION_ILLEGAL_INSTRUCTION:
194 W32_SEH_HANDLE_EX(ill);
196 case EXCEPTION_INT_DIVIDE_BY_ZERO:
197 case EXCEPTION_INT_OVERFLOW:
198 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
199 case EXCEPTION_FLT_OVERFLOW:
200 case EXCEPTION_FLT_UNDERFLOW:
201 case EXCEPTION_FLT_INEXACT_RESULT:
202 W32_SEH_HANDLE_EX(fpe);
208 /* Copy context back */
209 ctx->Eax = sctx->eax;
210 ctx->Ebx = sctx->ebx;
211 ctx->Ecx = sctx->ecx;
212 ctx->Edx = sctx->edx;
213 ctx->Ebp = sctx->ebp;
214 ctx->Esp = sctx->esp;
215 ctx->Esi = sctx->esi;
216 ctx->Edi = sctx->edi;
217 ctx->Eip = sctx->eip;
224 void win32_seh_init()
226 /* install restore stack helper */
228 restore_stack = mono_win32_get_handle_stackoverflow ();
230 old_handler = SetUnhandledExceptionFilter(seh_handler);
233 void win32_seh_cleanup()
235 if (old_handler) SetUnhandledExceptionFilter(old_handler);
238 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
242 fpe_handler = handler;
245 ill_handler = handler;
248 segv_handler = handler;
255 #endif /* PLATFORM_WIN32 */
258 * mono_arch_get_restore_context:
260 * Returns a pointer to a method which restores a previously saved sigcontext.
263 mono_arch_get_restore_context (void)
265 static guint8 *start = NULL;
271 /* restore_contect (MonoContext *ctx) */
272 /* we do not restore X86_EAX, X86_EDX */
274 start = code = mono_global_codeman_reserve (128);
277 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
279 /* get return address, stored in EDX */
280 x86_mov_reg_membase (code, X86_EDX, X86_EAX, G_STRUCT_OFFSET (MonoContext, eip), 4);
282 x86_mov_reg_membase (code, X86_EBX, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebx), 4);
284 x86_mov_reg_membase (code, X86_EDI, X86_EAX, G_STRUCT_OFFSET (MonoContext, edi), 4);
286 x86_mov_reg_membase (code, X86_ESI, X86_EAX, G_STRUCT_OFFSET (MonoContext, esi), 4);
288 x86_mov_reg_membase (code, X86_ESP, X86_EAX, G_STRUCT_OFFSET (MonoContext, esp), 4);
290 x86_mov_reg_membase (code, X86_EBP, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebp), 4);
292 /* jump to the saved IP */
293 x86_jump_reg (code, X86_EDX);
299 * mono_arch_get_call_filter:
301 * Returns a pointer to a method which calls an exception filter. We
302 * also use this function to call finally handlers (we pass NULL as
303 * @exc object in this case).
306 mono_arch_get_call_filter (void)
308 static guint8* start;
309 static int inited = 0;
316 /* call_filter (MonoContext *ctx, unsigned long eip) */
317 start = code = mono_global_codeman_reserve (64);
319 x86_push_reg (code, X86_EBP);
320 x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
321 x86_push_reg (code, X86_EBX);
322 x86_push_reg (code, X86_EDI);
323 x86_push_reg (code, X86_ESI);
326 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 8, 4);
328 x86_mov_reg_membase (code, X86_ECX, X86_EBP, 12, 4);
330 x86_push_reg (code, X86_EBP);
333 x86_mov_reg_membase (code, X86_EBP, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebp), 4);
334 /* restore registers used by global register allocation (EBX & ESI) */
335 x86_mov_reg_membase (code, X86_EBX, X86_EAX, G_STRUCT_OFFSET (MonoContext, ebx), 4);
336 x86_mov_reg_membase (code, X86_ESI, X86_EAX, G_STRUCT_OFFSET (MonoContext, esi), 4);
337 x86_mov_reg_membase (code, X86_EDI, X86_EAX, G_STRUCT_OFFSET (MonoContext, edi), 4);
339 /* align stack and save ESP */
340 x86_mov_reg_reg (code, X86_EDX, X86_ESP, 4);
341 x86_alu_reg_imm (code, X86_AND, X86_ESP, -MONO_ARCH_FRAME_ALIGNMENT);
342 g_assert (MONO_ARCH_FRAME_ALIGNMENT >= 8);
343 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 8);
344 x86_push_reg (code, X86_EDX);
346 /* call the handler */
347 x86_call_reg (code, X86_ECX);
350 x86_pop_reg (code, X86_ESP);
353 x86_pop_reg (code, X86_EBP);
355 /* restore saved regs */
356 x86_pop_reg (code, X86_ESI);
357 x86_pop_reg (code, X86_EDI);
358 x86_pop_reg (code, X86_EBX);
362 g_assert ((code - start) < 64);
367 throw_exception (unsigned long eax, unsigned long ecx, unsigned long edx, unsigned long ebx,
368 unsigned long esi, unsigned long edi, unsigned long ebp, MonoObject *exc,
369 unsigned long eip, unsigned long esp, gboolean rethrow)
371 static void (*restore_context) (MonoContext *);
374 if (!restore_context)
375 restore_context = mono_arch_get_restore_context ();
377 /* Pop alignment added in get_throw_exception (), the return address, plus the argument and the alignment added at the call site */
378 ctx.esp = esp + 8 + MONO_ARCH_FRAME_ALIGNMENT;
389 /* The OSX ABI specifies 16 byte alignment at call sites */
390 g_assert ((ctx.esp % MONO_ARCH_FRAME_ALIGNMENT) == 0);
393 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
394 MonoException *mono_ex = (MonoException*)exc;
396 mono_ex->stack_trace = NULL;
399 if (mono_debug_using_mono_debugger ()) {
400 guint8 buf [16], *code;
402 mono_breakpoint_clean_code (NULL, (gpointer)eip, 8, buf, sizeof (buf));
405 if (buf [3] == 0xe8) {
406 MonoContext ctx_cp = ctx;
407 ctx_cp.eip = eip - 5;
409 if (mono_debugger_handle_exception (&ctx_cp, exc)) {
410 restore_context (&ctx_cp);
411 g_assert_not_reached ();
416 /* adjust eip so that it point into the call instruction */
419 mono_handle_exception (&ctx, exc, (gpointer)eip, FALSE);
421 restore_context (&ctx);
423 g_assert_not_reached ();
427 get_throw_exception (gboolean rethrow)
429 guint8 *start, *code;
431 start = code = mono_global_codeman_reserve (64);
434 * Align the stack on apple, since we push 10 args, and the call pushed 4 bytes.
436 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
437 x86_push_reg (code, X86_ESP);
438 x86_push_membase (code, X86_ESP, 8); /* IP */
439 x86_push_membase (code, X86_ESP, 16); /* exception */
440 x86_push_reg (code, X86_EBP);
441 x86_push_reg (code, X86_EDI);
442 x86_push_reg (code, X86_ESI);
443 x86_push_reg (code, X86_EBX);
444 x86_push_reg (code, X86_EDX);
445 x86_push_reg (code, X86_ECX);
446 x86_push_reg (code, X86_EAX);
447 x86_call_code (code, throw_exception);
448 /* we should never reach this breakpoint */
449 x86_breakpoint (code);
451 g_assert ((code - start) < 64);
457 * mono_arch_get_throw_exception:
459 * Returns a function pointer which can be used to raise
460 * exceptions. The returned function has the following
461 * signature: void (*func) (MonoException *exc);
462 * For example to raise an arithmetic exception you can use:
464 * x86_push_imm (code, mono_get_exception_arithmetic ());
465 * x86_call_code (code, arch_get_throw_exception ());
469 mono_arch_get_throw_exception (void)
471 static guint8 *start;
472 static int inited = 0;
477 start = get_throw_exception (FALSE);
485 mono_arch_get_rethrow_exception (void)
487 static guint8 *start;
488 static int inited = 0;
493 start = get_throw_exception (TRUE);
501 * mono_arch_get_throw_exception_by_name:
503 * Returns a function pointer which can be used to raise
504 * corlib exceptions. The returned function has the following
505 * signature: void (*func) (gpointer ip, char *exc_name);
506 * For example to raise an arithmetic exception you can use:
508 * x86_push_imm (code, "ArithmeticException");
509 * x86_push_imm (code, <IP>)
510 * x86_jump_code (code, arch_get_throw_exception_by_name ());
514 mono_arch_get_throw_exception_by_name (void)
519 start = code = mono_global_codeman_reserve (32);
522 x86_breakpoint (code);
524 mono_arch_flush_icache (start, code - start);
530 * mono_arch_get_throw_corlib_exception:
532 * Returns a function pointer which can be used to raise
533 * corlib exceptions. The returned function has the following
534 * signature: void (*func) (guint32 ex_token, guint32 offset);
535 * Here, offset is the offset which needs to be substracted from the caller IP
536 * to get the IP of the throw. Passing the offset has the advantage that it
537 * needs no relocations in the caller.
540 mono_arch_get_throw_corlib_exception (void)
542 static guint8* start;
543 static int inited = 0;
550 code = start = mono_global_codeman_reserve (64);
553 * Align the stack on apple, the caller doesn't do this to save space,
554 * two arguments + the return addr are already on the stack.
556 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
557 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4 + 4, 4); /* token */
558 x86_alu_reg_imm (code, X86_ADD, X86_EAX, MONO_TOKEN_TYPE_DEF);
559 /* Align the stack on apple */
560 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
561 x86_push_reg (code, X86_EAX);
562 x86_push_imm (code, mono_defaults.exception_class->image);
563 x86_call_code (code, mono_exception_from_token);
564 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 16);
565 /* Compute caller ip */
566 x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
568 x86_mov_reg_membase (code, X86_EDX, X86_ESP, 4 + 4 + 4, 4);
570 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4 + 4 + 4 + 4);
571 x86_alu_reg_reg (code, X86_SUB, X86_ECX, X86_EDX);
572 /* Align the stack on apple, mirrors the sub in OP_THROW. */
573 x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
574 /* Push exception object */
575 x86_push_reg (code, X86_EAX);
577 x86_push_reg (code, X86_ECX);
578 x86_jump_code (code, mono_arch_get_throw_exception ());
580 g_assert ((code - start) < 64);
585 /* mono_arch_find_jit_info:
587 * This function is used to gather information from @ctx. It return the
588 * MonoJitInfo of the corresponding function, unwinds one stack frame and
589 * stores the resulting context into @new_ctx. It also stores a string
590 * describing the stack location into @trace (if not NULL), and modifies
591 * the @lmf if necessary. @native_offset return the IP offset from the
592 * start of the function or -1 if that info is not available.
595 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
596 MonoContext *new_ctx, MonoLMF **lmf, gboolean *managed)
599 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
601 /* Avoid costly table lookup during stack overflow */
602 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
605 ji = mini_jit_info_table_find (domain, ip);
613 gssize regs [MONO_MAX_IREGS + 1];
615 guint32 unwind_info_len;
619 if (!ji->method->wrapper_type)
623 unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
625 unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
627 regs [X86_EAX] = new_ctx->eax;
628 regs [X86_EBX] = new_ctx->ebx;
629 regs [X86_ECX] = new_ctx->ecx;
630 regs [X86_EDX] = new_ctx->edx;
631 regs [X86_ESP] = new_ctx->esp;
632 regs [X86_EBP] = new_ctx->ebp;
633 regs [X86_ESI] = new_ctx->esi;
634 regs [X86_EDI] = new_ctx->edi;
635 regs [X86_NREG] = new_ctx->eip;
637 mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
638 (guint8*)ji->code_start + ji->code_size,
639 ip, regs, MONO_MAX_IREGS + 1, &cfa);
641 new_ctx->eax = regs [X86_EAX];
642 new_ctx->ebx = regs [X86_EBX];
643 new_ctx->ecx = regs [X86_ECX];
644 new_ctx->edx = regs [X86_EDX];
645 new_ctx->esp = regs [X86_ESP];
646 new_ctx->ebp = regs [X86_EBP];
647 new_ctx->esi = regs [X86_ESI];
648 new_ctx->edi = regs [X86_EDI];
649 new_ctx->eip = regs [X86_NREG];
651 /* The CFA becomes the new SP value */
652 new_ctx->esp = (gssize)cfa;
657 if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
658 /* remove any unused lmf */
659 *lmf = (gpointer)(((guint32)(*lmf)->previous_lmf) & ~1);
662 /* Pop arguments off the stack */
664 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (ji->method)->param_count + 1);
666 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (ji->method), mono_method_signature (ji->method)->param_count, arg_info);
667 new_ctx->esp += stack_to_pop;
675 if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip))) {
677 if (!((guint32)((*lmf)->previous_lmf) & 1))
680 /* Trampoline lmf frame */
681 memset (res, 0, MONO_SIZEOF_JIT_INFO);
682 res->method = (*lmf)->method;
685 new_ctx->esi = (*lmf)->esi;
686 new_ctx->edi = (*lmf)->edi;
687 new_ctx->ebx = (*lmf)->ebx;
688 new_ctx->ebp = (*lmf)->ebp;
689 new_ctx->eip = (*lmf)->eip;
691 /* Check if we are in a trampoline LMF frame */
692 if ((guint32)((*lmf)->previous_lmf) & 1) {
693 /* lmf->esp is set by the trampoline code */
694 new_ctx->esp = (*lmf)->esp;
696 /* Pop arguments off the stack */
697 /* FIXME: Handle the delegate case too ((*lmf)->method == NULL) */
698 /* FIXME: Handle the IMT/vtable case too */
699 if ((*lmf)->method && (*lmf)->method != MONO_FAKE_IMT_METHOD && (*lmf)->method != MONO_FAKE_VTABLE_METHOD) {
700 MonoMethod *method = (*lmf)->method;
701 MonoJitArgumentInfo *arg_info = g_newa (MonoJitArgumentInfo, mono_method_signature (method)->param_count + 1);
703 guint32 stack_to_pop = mono_arch_get_argument_info (mono_method_signature (method), mono_method_signature (method)->param_count, arg_info);
704 new_ctx->esp += stack_to_pop;
708 /* the lmf is always stored on the stack, so the following
709 * expression points to a stack location which can be used as ESP */
710 new_ctx->esp = (unsigned long)&((*lmf)->eip);
712 *lmf = (gpointer)(((guint32)(*lmf)->previous_lmf) & ~1);
714 return ji ? ji : res;
733 mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
735 #ifdef MONO_ARCH_USE_SIGACTION
736 ucontext_t *ctx = (ucontext_t*)sigctx;
738 mctx->eax = UCONTEXT_REG_EAX (ctx);
739 mctx->ebx = UCONTEXT_REG_EBX (ctx);
740 mctx->ecx = UCONTEXT_REG_ECX (ctx);
741 mctx->edx = UCONTEXT_REG_EDX (ctx);
742 mctx->ebp = UCONTEXT_REG_EBP (ctx);
743 mctx->esp = UCONTEXT_REG_ESP (ctx);
744 mctx->esi = UCONTEXT_REG_ESI (ctx);
745 mctx->edi = UCONTEXT_REG_EDI (ctx);
746 mctx->eip = UCONTEXT_REG_EIP (ctx);
748 struct sigcontext *ctx = (struct sigcontext *)sigctx;
750 mctx->eax = ctx->SC_EAX;
751 mctx->ebx = ctx->SC_EBX;
752 mctx->ecx = ctx->SC_ECX;
753 mctx->edx = ctx->SC_EDX;
754 mctx->ebp = ctx->SC_EBP;
755 mctx->esp = ctx->SC_ESP;
756 mctx->esi = ctx->SC_ESI;
757 mctx->edi = ctx->SC_EDI;
758 mctx->eip = ctx->SC_EIP;
763 mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
765 #ifdef MONO_ARCH_USE_SIGACTION
766 ucontext_t *ctx = (ucontext_t*)sigctx;
768 UCONTEXT_REG_EAX (ctx) = mctx->eax;
769 UCONTEXT_REG_EBX (ctx) = mctx->ebx;
770 UCONTEXT_REG_ECX (ctx) = mctx->ecx;
771 UCONTEXT_REG_EDX (ctx) = mctx->edx;
772 UCONTEXT_REG_EBP (ctx) = mctx->ebp;
773 UCONTEXT_REG_ESP (ctx) = mctx->esp;
774 UCONTEXT_REG_ESI (ctx) = mctx->esi;
775 UCONTEXT_REG_EDI (ctx) = mctx->edi;
776 UCONTEXT_REG_EIP (ctx) = mctx->eip;
778 struct sigcontext *ctx = (struct sigcontext *)sigctx;
780 ctx->SC_EAX = mctx->eax;
781 ctx->SC_EBX = mctx->ebx;
782 ctx->SC_ECX = mctx->ecx;
783 ctx->SC_EDX = mctx->edx;
784 ctx->SC_EBP = mctx->ebp;
785 ctx->SC_ESP = mctx->esp;
786 ctx->SC_ESI = mctx->esi;
787 ctx->SC_EDI = mctx->edi;
788 ctx->SC_EIP = mctx->eip;
793 mono_arch_ip_from_context (void *sigctx)
795 #ifdef MONO_ARCH_USE_SIGACTION
796 ucontext_t *ctx = (ucontext_t*)sigctx;
797 return (gpointer)UCONTEXT_REG_EIP (ctx);
799 struct sigcontext *ctx = sigctx;
800 return (gpointer)ctx->SC_EIP;
805 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
809 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
811 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj))
814 mono_handle_exception (&mctx, obj, (gpointer)mctx.eip, test_only);
816 mono_arch_monoctx_to_sigctx (&mctx, sigctx);
822 restore_soft_guard_pages (void)
824 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
825 if (jit_tls->stack_ovf_guard_base)
826 mono_mprotect (jit_tls->stack_ovf_guard_base, jit_tls->stack_ovf_guard_size, MONO_MMAP_NONE);
830 * this function modifies mctx so that when it is restored, it
831 * won't execcute starting at mctx.eip, but in a function that
832 * will restore the protection on the soft-guard pages and return back to
833 * continue at mctx.eip.
836 prepare_for_guard_pages (MonoContext *mctx)
839 sp = (gpointer)(mctx->esp);
841 /* the resturn addr */
842 sp [0] = (gpointer)(mctx->eip);
843 mctx->eip = (unsigned long)restore_soft_guard_pages;
844 mctx->esp = (unsigned long)sp;
848 altstack_handle_and_restore (void *sigctx, gpointer obj, gboolean stack_ovf)
850 void (*restore_context) (MonoContext *);
853 restore_context = mono_arch_get_restore_context ();
854 mono_arch_sigctx_to_monoctx (sigctx, &mctx);
856 if (mono_debugger_handle_exception (&mctx, (MonoObject *)obj)) {
858 prepare_for_guard_pages (&mctx);
859 restore_context (&mctx);
862 mono_handle_exception (&mctx, obj, (gpointer)mctx.eip, FALSE);
864 prepare_for_guard_pages (&mctx);
865 restore_context (&mctx);
869 mono_arch_handle_altstack_exception (void *sigctx, gpointer fault_addr, gboolean stack_ovf)
871 #ifdef MONO_ARCH_USE_SIGACTION
872 MonoException *exc = NULL;
873 ucontext_t *ctx = (ucontext_t*)sigctx;
874 MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)UCONTEXT_REG_EIP (ctx));
878 /* if we didn't find a managed method for the ip address and it matches the fault
879 * address, we assume we followed a broken pointer during an indirect call, so
880 * we try the lookup again with the return address pushed on the stack
882 if (!ji && fault_addr == (gpointer)UCONTEXT_REG_EIP (ctx)) {
883 glong *sp = (gpointer)UCONTEXT_REG_ESP (ctx);
884 ji = mini_jit_info_table_find (mono_domain_get (), (gpointer)sp [0]);
886 UCONTEXT_REG_EIP (ctx) = sp [0];
889 exc = mono_domain_get ()->stack_overflow_ex;
891 mono_handle_native_sigsegv (SIGSEGV, sigctx);
892 /* setup a call frame on the real stack so that control is returned there
893 * and exception handling can continue.
894 * If this was a stack overflow the caller already ensured the stack pages
895 * needed have been unprotected.
896 * The frame looks like:
903 frame_size = sizeof (ucontext_t) + sizeof (gpointer) * 4;
906 sp = (gpointer)(UCONTEXT_REG_ESP (ctx) & ~15);
907 sp = (gpointer)((char*)sp - frame_size);
908 /* the incoming arguments are aligned to 16 bytes boundaries, so the return address IP
911 sp [-1] = (gpointer)UCONTEXT_REG_EIP (ctx);
914 sp [2] = (gpointer)stack_ovf;
915 /* may need to adjust pointers in the new struct copy, depending on the OS */
916 memcpy (sp + 4, ctx, sizeof (ucontext_t));
917 /* at the return form the signal handler execution starts in altstack_handle_and_restore() */
918 UCONTEXT_REG_EIP (ctx) = (unsigned long)altstack_handle_and_restore;
919 UCONTEXT_REG_ESP (ctx) = (unsigned long)(sp - 1);
923 #if MONO_SUPPORT_TASKLETS
924 MonoContinuationRestore
925 mono_tasklets_arch_restore (void)
927 static guint8* saved = NULL;
928 guint8 *code, *start;
931 return (MonoContinuationRestore)saved;
932 code = start = mono_global_codeman_reserve (48);
933 /* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
934 /* put cont in edx */
935 x86_mov_reg_membase (code, X86_EDX, X86_ESP, 4, 4);
936 /* setup the copy of the stack */
937 x86_mov_reg_membase (code, X86_ECX, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, stack_used_size), 4);
938 x86_shift_reg_imm (code, X86_SHR, X86_ECX, 2);
940 x86_mov_reg_membase (code, X86_ESI, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, saved_stack), 4);
941 x86_mov_reg_membase (code, X86_EDI, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, return_sp), 4);
942 x86_prefix (code, X86_REP_PREFIX);
945 /* now restore the registers from the LMF */
946 x86_mov_reg_membase (code, X86_ECX, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, lmf), 4);
947 x86_mov_reg_membase (code, X86_EBX, X86_ECX, G_STRUCT_OFFSET (MonoLMF, ebx), 4);
948 x86_mov_reg_membase (code, X86_EBP, X86_ECX, G_STRUCT_OFFSET (MonoLMF, ebp), 4);
949 x86_mov_reg_membase (code, X86_ESI, X86_ECX, G_STRUCT_OFFSET (MonoLMF, esi), 4);
950 x86_mov_reg_membase (code, X86_EDI, X86_ECX, G_STRUCT_OFFSET (MonoLMF, edi), 4);
952 /* restore the lmf chain */
953 /*x86_mov_reg_membase (code, X86_ECX, X86_ESP, 12, 4);
954 x86_mov_membase_reg (code, X86_ECX, 0, X86_EDX, 4);*/
956 /* state in eax, so it's setup as the return value */
957 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 8, 4);
958 x86_jump_membase (code, X86_EDX, G_STRUCT_OFFSET (MonoContinuation, return_ip));
959 g_assert ((code - start) <= 48);
961 return (MonoContinuationRestore)saved;