tailcall: len:120 clob:c
br: len:6
label: len:0
-seq_point: len:31 clob:c
+seq_point: len:46 clob:c
il_seq_point: len:0
long_add: dest:i src1:i src2:i len:3 clob:1 nacl:6
}
static void
-process_breakpoint_inner (DebuggerTlsData *tls)
+process_breakpoint_inner (DebuggerTlsData *tls, gboolean from_signal)
{
MonoJitInfo *ji;
guint8 *ip;
/*
* Skip the instruction causing the breakpoint signal.
*/
- mono_arch_skip_breakpoint (ctx, ji);
+ if (from_signal)
+ mono_arch_skip_breakpoint (ctx, ji);
if (method->wrapper_type || tls->disable_breakpoints)
return;
/* Process a breakpoint/single step event after resuming from a signal handler */
static void
-process_signal_event (void (*func) (DebuggerTlsData*))
+process_signal_event (void (*func) (DebuggerTlsData*, gboolean))
{
DebuggerTlsData *tls;
MonoThreadUnwindState orig_restore_state;
memcpy (&orig_restore_state, &tls->restore_state, sizeof (MonoThreadUnwindState));
mono_thread_state_init_from_monoctx (&tls->restore_state, &tls->handler_ctx);
- func (tls);
+ func (tls, TRUE);
/* This is called when resuming from a signal handler, so it shouldn't return */
memcpy (&ctx, &tls->restore_state.ctx, sizeof (MonoContext));
}
static void
-process_single_step_inner (DebuggerTlsData *tls)
+process_single_step_inner (DebuggerTlsData *tls, gboolean from_signal)
{
MonoJitInfo *ji;
guint8 *ip;
ip = MONO_CONTEXT_GET_IP (ctx);
/* Skip the instruction causing the single step */
- mono_arch_skip_single_step (ctx);
+ if (from_signal)
+ mono_arch_skip_single_step (ctx);
if (suspend_count > 0) {
process_suspend (tls, ctx);
memcpy (&orig_restore_state, &tls->restore_state, sizeof (MonoThreadUnwindState));
mono_thread_state_init_from_monoctx (&tls->restore_state, ctx);
- process_single_step_inner (tls);
+ process_single_step_inner (tls, FALSE);
memcpy (ctx, &tls->restore_state.ctx, sizeof (MonoContext));
memcpy (&tls->restore_state, &orig_restore_state, sizeof (MonoThreadUnwindState));
memcpy (&orig_restore_state, &tls->restore_state, sizeof (MonoContext));
mono_thread_state_init_from_monoctx (&tls->restore_state, ctx);
- process_breakpoint_inner (tls);
+ process_breakpoint_inner (tls, FALSE);
memcpy (ctx, &tls->restore_state.ctx, sizeof (MonoContext));
memcpy (&tls->restore_state, &orig_restore_state, sizeof (MonoThreadUnwindState));
/* The size of the single step instruction causing the actual fault */
static int single_step_fault_size;
+/* The single step trampoline */
+static gpointer ss_trampoline;
+
/* Offset between fp and the first argument in the callee */
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG AMD64_R11
MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_tramp_var = ins;
}
ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
case OP_SEQ_POINT: {
int i;
- /*
- * Read from the single stepping trigger page. This will cause a
- * SIGSEGV when single stepping is enabled.
- * We do this _before_ the breakpoint, so single stepping after
- * a breakpoint is hit will step to the next IL offset.
- */
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- MonoInst *var = cfg->arch.ss_trigger_page_var;
+ if (cfg->compile_aot) {
+ MonoInst *var = cfg->arch.ss_tramp_var;
+ guint8 *label;
+
+ /* Load ss_tramp_var */
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ /* Load the trampoline address */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ /* Call it if it is non-null */
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_patch (label, code);
+ } else {
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
- amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
+ }
}
/*
guint32 offset = code - cfg->native_code;
guint32 val;
MonoInst *info_var = cfg->arch.seq_point_info_var;
+ guint8 *label;
/* Load info var */
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
val = ((offset) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
- /* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */
+ /* Load the info->bp_addrs [offset], which is either NULL or the address of the breakpoint trampoline */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
+ /* Call the trampoline */
+ amd64_call_reg (code, AMD64_R11);
+ amd64_patch (label, code);
} else {
/*
* A placeholder for a possible breakpoint inserted by
amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8);
}
- /* Initialize ss_trigger_page_var */
- ins = cfg->arch.ss_trigger_page_var;
-
- g_assert (ins->opcode == OP_REGOFFSET);
-
if (cfg->compile_aot) {
+ /* Initialize ss_tramp_var */
+ ins = cfg->arch.ss_tramp_var;
+ g_assert (ins->opcode == OP_REGOFFSET);
+
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
} else {
+ /* Initialize ss_trigger_page_var */
+ ins = cfg->arch.ss_trigger_page_var;
+
+ g_assert (ins->opcode == OP_REGOFFSET);
+
amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
cfg->code_len = code - cfg->native_code;
SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
g_assert (info->bp_addrs [native_offset] == 0);
- info->bp_addrs [native_offset] = bp_trigger_page;
+ info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline ();
} else {
/*
* In production, we will use int3 (has to fix the size in the md
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
- g_assert (info->bp_addrs [native_offset] == 0);
- info->bp_addrs [native_offset] = info;
+ info->bp_addrs [native_offset] = NULL;
} else {
for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
if (ji->from_aot) {
- /* amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8) */
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 3);
+ /* The breakpoint instruction is a call */
} else {
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
}
mono_arch_start_single_stepping (void)
{
mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+ ss_trampoline = mini_get_single_step_trampoline ();
}
/*
mono_arch_stop_single_stepping (void)
{
mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+ ss_trampoline = NULL;
}
/*
{
SeqPointInfo *info;
MonoJitInfo *ji;
- int i;
// FIXME: Add a free function
// FIXME: Optimize the size
info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
- info->ss_trigger_page = ss_trigger_page;
- info->bp_trigger_page = bp_trigger_page;
- /* Initialize to a valid address */
- for (i = 0; i < ji->code_size; ++i)
- info->bp_addrs [i] = info;
+ info->ss_tramp_addr = &ss_trampoline;
mono_domain_lock (domain);
g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
#endif
gpointer seq_point_info_var;
gpointer ss_trigger_page_var;
+ gpointer ss_tramp_var;
gpointer lmf_var;
} MonoCompileArch;
/* Structure used by the sequence points in AOTed code */
typedef struct {
- gpointer ss_trigger_page;
- gpointer bp_trigger_page;
+ gpointer ss_tramp_addr;
gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
} SeqPointInfo;
#define MONO_ARCH_HAVE_OP_TAIL_CALL 1
#define MONO_ARCH_HAVE_TRANSLATE_TLS_OFFSET 1
#define MONO_ARCH_HAVE_DUMMY_INIT 1
+#define MONO_ARCH_HAVE_SDB_TRAMPOLINES 1
#if defined(TARGET_OSX) || defined(__linux__)
#define MONO_ARCH_HAVE_TLS_GET_REG 1
return nullified_class_init_trampoline;
}
+
+/*
+ * mini_get_single_step_trampoline:
+ *
+ * Return a trampoline which calls debugger_agent_single_step_from_context ().
+ */
+gpointer
+mini_get_single_step_trampoline (void)
+{
+ static gpointer trampoline;
+
+ if (!trampoline) {
+ gpointer tramp;
+ MonoTrampInfo *info;
+
+ if (mono_aot_only) {
+ tramp = mono_aot_get_trampoline ("sdb_single_step_trampoline");
+ } else {
+#ifdef MONO_ARCH_HAVE_SDB_TRAMPOLINES
+ tramp = mono_arch_create_sdb_trampoline (TRUE, &info, FALSE);
+ mono_tramp_info_register (info);
+#else
+ tramp = NULL;
+ g_assert_not_reached ();
+#endif
+ }
+ mono_memory_barrier ();
+ trampoline = tramp;
+ }
+
+ return trampoline;
+}
+
+/*
+ * mini_get_breakpoint_trampoline:
+ *
+ * Return a trampoline which calls debugger_agent_breakpoint_from_context ().
+ */
+gpointer
+mini_get_breakpoint_trampoline (void)
+{
+ static gpointer trampoline;
+
+ if (!trampoline) {
+ gpointer tramp;
+ MonoTrampInfo *info;
+
+ if (mono_aot_only) {
+ tramp = mono_aot_get_trampoline ("sdb_breakpoint_trampoline");
+ } else {
+#ifdef MONO_ARCH_HAVE_SDB_TRAMPOLINES
+ tramp = mono_arch_create_sdb_trampoline (FALSE, &info, FALSE);
+ mono_tramp_info_register (info);
+#else
+ tramp = NULL;
+ g_assert_not_reached ();
+#endif
+ }
+ mono_memory_barrier ();
+ trampoline = tramp;
+ }
+
+ return trampoline;
+}
char* mono_get_generic_trampoline_name (MonoTrampolineType tramp_type);
char* mono_get_rgctx_fetch_trampoline_name (int slot);
gpointer mini_get_nullified_class_init_trampoline (void);
+gpointer mini_get_single_step_trampoline (void);
+gpointer mini_get_breakpoint_trampoline (void);
gpointer mini_add_method_trampoline (MonoMethod *orig_method, MonoMethod *m, gpointer compiled_method, gboolean add_static_rgctx_tramp, gboolean add_unbox_tramp);
gboolean mini_jit_info_is_gsharedvt (MonoJitInfo *ji);
gpointer mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info);
+guint8* mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot);
gpointer mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean is_v4, gboolean aot);
gpointer mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot);
guint8 *mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8* addr) MONO_LLVM_INTERNAL;
#include "mini.h"
#include "mini-amd64.h"
+#include "debugger-agent.h"
#if defined(__native_client_codegen__) && defined(__native_client__)
#include <malloc.h>
#include <nacl/nacl_dyncode.h>
#endif
+#define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
+
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
/*
return *(guint32*)(plt_entry + 6);
#endif
}
+
+/*
+ * mono_arch_create_sdb_trampoline:
+ *
+ * Return a trampoline which captures the current context, passes it to
+ * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
+ * then restores the (potentially changed) context.
+ */
+guint8*
+mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
+{
+ int tramp_size = 256;
+ int framesize, ctx_offset;
+ guint8 *code, *buf;
+ GSList *unwind_ops = NULL;
+ MonoJumpInfo *ji = NULL;
+
+ g_assert (!aot);
+
+ code = buf = mono_global_codeman_reserve (tramp_size);
+
+ framesize = sizeof (MonoContext);
+ framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
+
+ // FIXME: Unwind info
+ amd64_push_reg (code, AMD64_RBP);
+ amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
+ amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
+
+ ctx_offset = 0;
+
+ /* Initialize a MonoContext structure on the stack */
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rax), AMD64_RAX, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rbx), AMD64_RBX, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rcx), AMD64_RCX, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rdx), AMD64_RDX, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rsi), AMD64_RSI, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rdi), AMD64_RDI, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r8), AMD64_R8, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r9), AMD64_R9, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r10), AMD64_R10, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r11), AMD64_R11, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r12), AMD64_R12, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r13), AMD64_R13, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r14), AMD64_R14, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r15), AMD64_R15, sizeof (mgreg_t));
+
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rbp), AMD64_R11, sizeof (mgreg_t));
+ amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rsp), AMD64_R11, sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (mgreg_t), sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rip), AMD64_R11, sizeof (mgreg_t));
+
+#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
+ /* Call the single step/breakpoint function in sdb */
+ amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
+ if (single_step)
+ amd64_call_code (code, debugger_agent_single_step_from_context);
+ else
+ amd64_call_code (code, debugger_agent_breakpoint_from_context);
+#else
+ g_assert_not_reached ();
+#endif
+
+ /* Restore registers from ctx */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rax), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rbx), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rcx), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rdx), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rsi), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rdi), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R8, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r8), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R9, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r9), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R10, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r10), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r11), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R12, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r12), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R13, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r13), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R14, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r14), sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R15, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, r15), sizeof (mgreg_t));
+
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rbp), sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (mgreg_t));
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + G_STRUCT_OFFSET (MonoContext, rip), sizeof (mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
+
+ amd64_leave (code);
+ amd64_ret (code);
+
+ mono_arch_flush_icache (code, code - buf);
+ g_assert (code - buf <= tramp_size);
+
+ if (info) {
+ const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
+ *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
+ }
+
+ return buf;
+}