2 * exceptions-amd64.c: exception support for AMD64
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
14 #include <sys/ucontext.h>
16 #include <mono/arch/amd64/amd64-codegen.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/threads.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/metadata/exception.h>
22 #include <mono/metadata/gc-internal.h>
23 #include <mono/metadata/mono-debug.h>
26 #include "mini-amd64.h"
28 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
31 static MonoW32ExceptionHandler fpe_handler;
32 static MonoW32ExceptionHandler ill_handler;
33 static MonoW32ExceptionHandler segv_handler;
35 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
37 #define W32_SEH_HANDLE_EX(_ex) \
38 if (_ex##_handler) _ex##_handler((int)sctx)
41 * Unhandled Exception Filter
42 * Top-level per-process exception handler.
44 LONG CALLBACK seh_handler(EXCEPTION_POINTERS* ep)
51 res = EXCEPTION_CONTINUE_EXECUTION;
53 er = ep->ExceptionRecord;
54 ctx = ep->ContextRecord;
55 sctx = g_malloc(sizeof(MonoContext));
57 /* Copy Win32 context to UNIX style context */
68 switch (er->ExceptionCode) {
69 case EXCEPTION_ACCESS_VIOLATION:
70 W32_SEH_HANDLE_EX(segv);
72 case EXCEPTION_ILLEGAL_INSTRUCTION:
73 W32_SEH_HANDLE_EX(ill);
75 case EXCEPTION_INT_DIVIDE_BY_ZERO:
76 case EXCEPTION_INT_OVERFLOW:
77 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
78 case EXCEPTION_FLT_OVERFLOW:
79 case EXCEPTION_FLT_UNDERFLOW:
80 case EXCEPTION_FLT_INEXACT_RESULT:
81 W32_SEH_HANDLE_EX(fpe);
87 /* Copy context back */
101 void win32_seh_init()
103 old_handler = SetUnhandledExceptionFilter(seh_handler);
106 void win32_seh_cleanup()
108 if (old_handler) SetUnhandledExceptionFilter(old_handler);
111 void win32_seh_set_handler(int type, MonoW32ExceptionHandler handler)
115 fpe_handler = handler;
118 ill_handler = handler;
121 segv_handler = handler;
128 #endif /* PLATFORM_WIN32 */
131 * Can't allocate the helper methods in static arrays as on other platforms.
133 static MonoCodeManager *code_manager = NULL;
134 static CRITICAL_SECTION code_manager_mutex;
137 mono_amd64_exceptions_init ()
139 InitializeCriticalSection (&code_manager_mutex);
140 code_manager = mono_code_manager_new ();
144 * mono_arch_get_restore_context:
146 * Returns a pointer to a method which restores a previously saved sigcontext.
149 mono_arch_get_restore_context (void)
151 static guint8 *start = NULL;
152 static gboolean inited = FALSE;
158 /* restore_contect (MonoContext *ctx) */
160 EnterCriticalSection (&code_manager_mutex);
161 start = code = mono_code_manager_reserve (code_manager, 1024);
162 LeaveCriticalSection (&code_manager_mutex);
164 /* get return address */
165 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rip), 8);
167 /* Restore registers */
168 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8);
169 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8);
170 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8);
171 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8);
172 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8);
173 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8);
175 amd64_mov_reg_membase (code, AMD64_RSP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rsp), 8);
177 /* jump to the saved IP */
178 amd64_jump_reg (code, AMD64_RAX);
186 * mono_arch_get_call_filter:
188 * Returns a pointer to a method which calls an exception filter. We
189 * also use this function to call finally handlers (we pass NULL as
190 * @exc object in this case).
193 mono_arch_get_call_filter (void)
195 static guint8 *start;
196 static gboolean inited = FALSE;
204 EnterCriticalSection (&code_manager_mutex);
205 start = code = mono_code_manager_reserve (code_manager, 64);
206 LeaveCriticalSection (&code_manager_mutex);
208 /* call_filter (MonoContext *ctx, unsigned long eip) */
211 /* Alloc new frame */
212 amd64_push_reg (code, AMD64_RBP);
213 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
215 /* Save callee saved regs */
217 for (i = 0; i < AMD64_NREG; ++i)
218 if (AMD64_IS_CALLEE_SAVED_REG (i)) {
219 amd64_push_reg (code, i);
225 amd64_push_reg (code, AMD64_RBP);
227 /* Make stack misaligned, the call will make it aligned again */
229 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
232 amd64_mov_reg_membase (code, AMD64_RBP, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbp), 8);
233 /* load callee saved regs */
234 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, rbx), 8);
235 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r12), 8);
236 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r13), 8);
237 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r14), 8);
238 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RDI, G_STRUCT_OFFSET (MonoContext, r15), 8);
240 /* call the handler */
241 amd64_call_reg (code, AMD64_RSI);
244 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
247 amd64_pop_reg (code, AMD64_RBP);
249 /* Restore callee saved regs */
250 for (i = AMD64_NREG; i >= 0; --i)
251 if (AMD64_IS_CALLEE_SAVED_REG (i))
252 amd64_pop_reg (code, i);
257 g_assert ((code - start) < 64);
265 throw_exception (MonoObject *exc, guint64 rip, guint64 rsp,
266 guint64 rbx, guint64 rbp, guint64 r12, guint64 r13,
267 guint64 r14, guint64 r15)
269 static void (*restore_context) (MonoContext *);
272 if (!restore_context)
273 restore_context = mono_arch_get_restore_context ();
275 /* adjust eip so that it point into the call instruction */
287 if (mono_object_isinst (exc, mono_defaults.exception_class)) {
288 MonoException *mono_ex = (MonoException*)exc;
289 mono_ex->stack_trace = NULL;
291 mono_handle_exception (&ctx, exc, (gpointer)(rip + 1), FALSE);
292 restore_context (&ctx);
294 g_assert_not_reached ();
298 * mono_arch_get_throw_exception:
300 * Returns a function pointer which can be used to raise
301 * exceptions. The returned function has the following
302 * signature: void (*func) (MonoException *exc);
306 mono_arch_get_throw_exception (void)
308 static guint8* start;
309 static gboolean inited = FALSE;
315 EnterCriticalSection (&code_manager_mutex);
316 start = code = mono_code_manager_reserve (code_manager, 64);
317 LeaveCriticalSection (&code_manager_mutex);
322 amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RDI, 8);
324 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, 0, 8);
326 amd64_lea_membase (code, AMD64_RDX, AMD64_RSP, 8);
327 /* Callee saved regs */
328 amd64_mov_reg_reg (code, AMD64_RCX, AMD64_RBX, 8);
329 amd64_mov_reg_reg (code, AMD64_R8, AMD64_RBP, 8);
330 amd64_mov_reg_reg (code, AMD64_R9, AMD64_R12, 8);
332 amd64_push_reg (code, AMD64_R15);
333 amd64_push_reg (code, AMD64_R14);
334 amd64_push_reg (code, AMD64_R13);
336 amd64_mov_reg_imm (code, AMD64_R11, throw_exception);
337 amd64_call_reg (code, AMD64_R11);
338 amd64_breakpoint (code);
340 g_assert ((code - start) < 64);
348 * mono_arch_get_throw_exception_by_name:
350 * Returns a function pointer which can be used to raise
351 * corlib exceptions. The returned function has the following
352 * signature: void (*func) (char *exc_name);
355 mono_arch_get_throw_exception_by_name (void)
357 static guint8* start;
358 static gboolean inited = FALSE;
365 EnterCriticalSection (&code_manager_mutex);
366 start = code = mono_code_manager_reserve (code_manager, 64);
367 LeaveCriticalSection (&code_manager_mutex);
371 /* Push return address */
372 amd64_push_reg (code, AMD64_RSI);
374 /* Call exception_from_name */
375 amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RDI, 8);
376 amd64_mov_reg_imm (code, AMD64_RSI, "System");
377 amd64_mov_reg_imm (code, AMD64_RDI, mono_defaults.exception_class->image);
379 amd64_mov_reg_imm (code, AMD64_R11, mono_exception_from_name);
380 amd64_call_reg (code, AMD64_R11);
382 /* Put the original return address at the top of the misaligned stack */
383 amd64_pop_reg (code, AMD64_RSI);
384 amd64_push_reg (code, AMD64_R11);
385 amd64_push_reg (code, AMD64_RSI);
387 throw_ex = (guint64)mono_arch_get_throw_exception ();
389 /* Call throw_exception */
390 amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RAX, 8);
391 amd64_mov_reg_imm (code, AMD64_R11, throw_ex);
392 /* The original IP is on the stack */
393 amd64_jump_reg (code, AMD64_R11);
395 g_assert ((code - start) < 64);
402 /* mono_arch_find_jit_info:
404 * This function is used to gather information from @ctx. It return the
405 * MonoJitInfo of the corresponding function, unwinds one stack frame and
406 * stores the resulting context into @new_ctx. It also stores a string
407 * describing the stack location into @trace (if not NULL), and modifies
408 * the @lmf if necessary. @native_offset return the IP offset from the
409 * start of the function or -1 if that info is not available.
412 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls, MonoJitInfo *res, MonoJitInfo *prev_ji, MonoContext *ctx,
413 MonoContext *new_ctx, char **trace, MonoLMF **lmf, int *native_offset,
418 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
420 /* Avoid costly table lookup during stack overflow */
421 if (prev_ji && (ip > prev_ji->code_start && ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
424 ji = mono_jit_info_table_find (domain, ip);
435 if (!ji->method->wrapper_type)
439 * Some managed methods like pinvoke wrappers might have save_lmf set.
440 * In this case, register save/restore code is not generated by the
441 * JIT, so we have to restore callee saved registers from the lmf.
443 if (ji->method->save_lmf) {
445 * We only need to do this if the exception was raised in managed
446 * code, since otherwise the lmf was already popped of the stack.
448 if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
449 new_ctx->rbx = (*lmf)->rbx;
450 new_ctx->r12 = (*lmf)->r12;
451 new_ctx->r13 = (*lmf)->r13;
452 new_ctx->r14 = (*lmf)->r14;
453 new_ctx->r15 = (*lmf)->r15;
458 /* restore caller saved registers */
459 for (i = 0; i < AMD64_NREG; i ++)
460 if (AMD64_IS_CALLEE_SAVED_REG (i) && (ji->used_regs & (1 << i))) {
461 guint64 reg = *((guint64 *)ctx->SC_EBP + offset);
480 g_assert_not_reached ();
485 if (*lmf && (MONO_CONTEXT_GET_BP (ctx) >= (gpointer)(*lmf)->ebp)) {
486 /* remove any unused lmf */
487 *lmf = (*lmf)->previous_lmf;
490 /* Pop EBP and the return address */
491 new_ctx->SC_ESP = ctx->SC_EBP + (2 * sizeof (gpointer));
492 /* we substract 1, so that the IP points into the call instruction */
493 new_ctx->SC_EIP = *((guint64 *)ctx->SC_EBP + 1) - 1;
494 new_ctx->SC_EBP = *((guint64 *)ctx->SC_EBP);
496 /* Pop arguments off the stack */
498 MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (ji->method->signature->param_count + 1));
500 guint32 stack_to_pop = mono_arch_get_argument_info (ji->method->signature, ji->method->signature->param_count, arg_info);
501 new_ctx->SC_ESP += stack_to_pop;
513 if ((ji = mono_jit_info_table_find (domain, (gpointer)(*lmf)->rip))) {
516 memset (res, 0, sizeof (MonoJitInfo));
517 res->method = (*lmf)->method;
520 new_ctx->SC_RIP = (*lmf)->rip;
521 new_ctx->SC_RBP = (*lmf)->ebp;
523 new_ctx->SC_RBX = (*lmf)->rbx;
524 new_ctx->SC_R12 = (*lmf)->r12;
525 new_ctx->SC_R13 = (*lmf)->r13;
526 new_ctx->SC_R14 = (*lmf)->r14;
527 new_ctx->SC_R15 = (*lmf)->r15;
529 /* the lmf is always stored on the stack, so the following
530 * expression points to a stack location which can be used as ESP */
531 new_ctx->SC_ESP = ALIGN_TO ((guint64)&((*lmf)->rip), 16);
533 *lmf = (*lmf)->previous_lmf;
543 * mono_arch_handle_exception:
545 * @ctx: saved processor state
546 * @obj: the exception object
549 mono_arch_handle_exception (void *sigctx, gpointer obj, gboolean test_only)
551 ucontext_t *ctx = (ucontext_t*)sigctx;
554 mctx.rax = ctx->uc_mcontext.gregs [REG_RAX];
555 mctx.rbx = ctx->uc_mcontext.gregs [REG_RBX];
556 mctx.rcx = ctx->uc_mcontext.gregs [REG_RCX];
557 mctx.rdx = ctx->uc_mcontext.gregs [REG_RDX];
558 mctx.rbp = ctx->uc_mcontext.gregs [REG_RBP];
559 mctx.rsp = ctx->uc_mcontext.gregs [REG_RSP];
560 mctx.rsi = ctx->uc_mcontext.gregs [REG_RSI];
561 mctx.rdi = ctx->uc_mcontext.gregs [REG_RDI];
562 mctx.rip = ctx->uc_mcontext.gregs [REG_RIP];
563 mctx.r12 = ctx->uc_mcontext.gregs [REG_R12];
564 mctx.r13 = ctx->uc_mcontext.gregs [REG_R13];
565 mctx.r14 = ctx->uc_mcontext.gregs [REG_R14];
566 mctx.r15 = ctx->uc_mcontext.gregs [REG_R15];
568 mono_handle_exception (&mctx, obj, mctx.rip, test_only);
570 ctx->uc_mcontext.gregs [REG_RAX] = mctx.rax;
571 ctx->uc_mcontext.gregs [REG_RBX] = mctx.rbx;
572 ctx->uc_mcontext.gregs [REG_RCX] = mctx.rcx;
573 ctx->uc_mcontext.gregs [REG_RDX] = mctx.rdx;
574 ctx->uc_mcontext.gregs [REG_RBP] = mctx.rbp;
575 ctx->uc_mcontext.gregs [REG_RSP] = mctx.rsp;
576 ctx->uc_mcontext.gregs [REG_RSI] = mctx.rsi;
577 ctx->uc_mcontext.gregs [REG_RDI] = mctx.rdi;
578 ctx->uc_mcontext.gregs [REG_RIP] = mctx.rip;
579 ctx->uc_mcontext.gregs [REG_R12] = mctx.r12;
580 ctx->uc_mcontext.gregs [REG_R13] = mctx.r13;
581 ctx->uc_mcontext.gregs [REG_R14] = mctx.r14;
582 ctx->uc_mcontext.gregs [REG_R15] = mctx.r15;
588 mono_arch_ip_from_context (void *sigctx)
590 ucontext_t *ctx = (ucontext_t*)sigctx;
591 return (gpointer)ctx->uc_mcontext.gregs [REG_RIP];