#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-debug.h>
#include <mono/utils/mono-math.h>
+#include <mono/utils/mono-mmap.h>
#include "trace.h"
#include "ir-emit.h"
#include "mini-amd64.h"
#include "cpu-amd64.h"
+#include "debugger-agent.h"
/*
* Can't define this in mini-amd64.h cause that would turn on the generic code in
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
-static gint thread_tls_offset = -1;
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
/* Under windows, the calling convention is never stdcall */
#define CALLCONV_IS_STDCALL(call_conv) (FALSE)
#else
#define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
#endif
+/* amd64_mov_reg_imm () */
+#define BREAKPOINT_SIZE 8
+
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-#ifdef PLATFORM_WIN32
+/*
+ * The code generated for sequence points reads from this location, which is
+ * made read-only when single stepping is enabled.
+ */
+static gpointer ss_trigger_page;
+
+/* Enabled breakpoints read from this trigger page */
+static gpointer bp_trigger_page;
+
+#ifdef HOST_WIN32
/* On Win64 always reserve first 32 bytes for first four arguments */
#define ARGS_OFFSET 48
#else
#define DEBUG(a) if (cfg->verbose_level > 1) a
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
#define PARAM_REGS 4
static AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
}
}
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
#define FLOAT_PARAM_REGS 4
#else
#define FLOAT_PARAM_REGS 8
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
class2 = ARG_CLASS_INTEGER;
#else
class2 = ARG_CLASS_SSE;
klass = mono_class_from_mono_type (type);
size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
-#ifndef PLATFORM_WIN32
+#ifndef HOST_WIN32
if (!sig->pinvoke && !disable_vtypes_in_regs && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
/* We pass and return vtypes of size 8 in a register */
} else if (!sig->pinvoke || (size == 0) || (size > 16)) {
info = mono_marshal_load_type_info (klass);
g_assert (info);
-#ifndef PLATFORM_WIN32
+#ifndef HOST_WIN32
if (info->native_size > 16) {
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (info->native_size, 8);
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
/* The float param registers and other param registers must be the same index on Windows x64.*/
if (gr > fr)
fr = gr;
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_TYPEDBYREF:
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
#else
stack_size += sizeof (MonoTypedRef);
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
// There always is 32 bytes reserved on the stack when calling on Winx64
stack_size += 0x20;
#endif
mono_arch_init (void)
{
InitializeCriticalSection (&mini_arch_mutex);
+
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
}
/*
*
* Returns a bitmask corresponding to all supported versions.
*
- * TODO detect other versions like SSE4a.
*/
guint32
mono_arch_cpu_enumerate_simd_versions (void)
if (ecx & (1 << 20))
sse_opts |= 1 << SIMD_VERSION_SSE42;
}
+
+ /* Yes, all this needs to be done to check for sse4a.
+ See: "Amd: CPUID Specification"
+ */
+ if (cpuid (0x80000000, &eax, &ebx, &ecx, &edx)) {
+ /* eax greater or equal than 0x80000001, ebx = 'htuA', ecx = DMAc', edx = 'itne'*/
+ if ((((unsigned int) eax) >= 0x80000001) && (ebx == 0x68747541) && (ecx == 0x444D4163) && (edx == 0x69746E65)) {
+ cpuid (0x80000001, &eax, &ebx, &ecx, &edx);
+ if (ecx & (1 << 6))
+ sse_opts |= 1 << SIMD_VERSION_SSE4a;
+ }
+ }
+
return sse_opts;
}
regs = g_list_prepend (regs, (gpointer)AMD64_R13);
regs = g_list_prepend (regs, (gpointer)AMD64_R14);
regs = g_list_prepend (regs, (gpointer)AMD64_R15);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
#endif
/* Allocate locals */
if (!cfg->globalra) {
offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
+ char *mname = mono_method_full_name (cfg->method, TRUE);
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ g_free (mname);
+ return;
+ }
+
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+ // Arguments are yet supported by the stack map creation code
+ //cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
} else {
offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
ins->inst_offset = - offset;
+ //cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
}
}
}
}
}
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (call->inst.opcode != OP_JMP && OP_TAILCALL != call->inst.opcode) {
MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 0x20);
}
{
int i;
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
return FALSE;
#endif
case ArgNone:
case ArgInIReg:
break;
+ case ArgValuetypeInReg: {
+ ArgInfo *ainfo = &cinfo->ret;
+
+ if (ainfo->pair_storage [0] != ArgNone && ainfo->pair_storage [0] != ArgInIReg)
+ return FALSE;
+ if (ainfo->pair_storage [1] != ArgNone && ainfo->pair_storage [1] != ArgInIReg)
+ return FALSE;
+ break;
+ }
default:
return FALSE;
}
void
mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
{
- ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
- MonoMethodSignature *sig = ainfo->sig;
+ ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
+ MonoMethodSignature *sig = dinfo->sig;
guint8 *ret = ((DynCallArgs*)buf)->ret;
+ mgreg_t res = ((DynCallArgs*)buf)->res;
switch (mono_type_get_underlying_type (sig->ret)->type) {
case MONO_TYPE_VOID:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_PTR:
- *(gpointer*)ret = (gpointer)((DynCallArgs*)buf)->res;
+ *(gpointer*)ret = (gpointer)res;
break;
case MONO_TYPE_I1:
- *(gint8*)ret = ((DynCallArgs*)buf)->res;
+ *(gint8*)ret = res;
break;
case MONO_TYPE_U1:
case MONO_TYPE_BOOLEAN:
- *(guint8*)ret = ((DynCallArgs*)buf)->res;
+ *(guint8*)ret = res;
break;
case MONO_TYPE_I2:
- *(gint16*)ret = ((DynCallArgs*)buf)->res;
+ *(gint16*)ret = res;
break;
case MONO_TYPE_U2:
case MONO_TYPE_CHAR:
- *(guint16*)ret = ((DynCallArgs*)buf)->res;
+ *(guint16*)ret = res;
break;
case MONO_TYPE_I4:
- *(gint32*)ret = ((DynCallArgs*)buf)->res;
+ *(gint32*)ret = res;
break;
case MONO_TYPE_U4:
- *(guint32*)ret = ((DynCallArgs*)buf)->res;
+ *(guint32*)ret = res;
break;
case MONO_TYPE_I8:
- *(gint64*)ret = ((DynCallArgs*)buf)->res;
+ *(gint64*)ret = res;
break;
case MONO_TYPE_U8:
- *(guint64*)ret = ((DynCallArgs*)buf)->res;
+ *(guint64*)ret = res;
break;
case MONO_TYPE_GENERICINST:
if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
- *(gpointer*)ret = (gpointer)((DynCallArgs*)buf)->res;
+ *(gpointer*)ret = (gpointer)res;
break;
} else {
/* Fall through */
}
case MONO_TYPE_VALUETYPE:
- g_assert (ainfo->cinfo->vtype_retaddr);
- /* Nothing to do */
+ if (dinfo->cinfo->vtype_retaddr) {
+ /* Nothing to do */
+ } else {
+ ArgInfo *ainfo = &dinfo->cinfo->ret;
+
+ g_assert (ainfo->storage == ArgValuetypeInReg);
+
+ if (ainfo->pair_storage [0] != ArgNone) {
+ g_assert (ainfo->pair_storage [0] == ArgInIReg);
+ ((mgreg_t*)ret)[0] = res;
+ }
+
+ g_assert (ainfo->pair_storage [1] == ArgNone);
+ }
break;
default:
g_assert_not_reached ();
near_call = FALSE;
#endif
+ /* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
+ if (optimize_for_xen)
+ near_call = FALSE;
+
if (near_call) {
/*
* Align the call displacement to an address divisible by 4 so it does
static inline guint8*
emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data, gboolean win64_adjust_stack)
{
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (win64_adjust_stack)
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
#endif
code = emit_call_body (cfg, code, patch_type, data);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (win64_adjust_stack)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
#endif
int sreg = tree->sreg1;
int need_touch = FALSE;
-#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (!tree->flags & MONO_INST_INIT)
need_touch = TRUE;
#endif
guint8*
mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
g_assert (tls_offset < 64);
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_SEQ_POINT: {
+ int i;
+
+ if (cfg->compile_aot)
+ NOT_IMPLEMENTED;
+
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ g_assert (((guint64)ss_trigger_page >> 32) == 0);
+
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4);
+
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+
+ /*
+ * A placeholder for a possible breakpoint inserted by
+ * mono_arch_set_breakpoint ().
+ */
+ for (i = 0; i < BREAKPOINT_SIZE; ++i)
+ x86_nop (code);
+ break;
+ }
case OP_ADDCC:
case OP_LADD:
amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - cfa_offset);
async_exc_point (code);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
mono_arch_unwindinfo_add_push_nonvol (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
#endif
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (gpointer));
mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
async_exc_point (code);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
mono_arch_unwindinfo_add_set_fpreg (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
#endif
}
/* Allocate stack frame */
if (alloc_size) {
/* See mono_emit_stack_alloc */
-#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
+ /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
+ guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 10; /*10 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/
+ guint32 offset = code - cfg->native_code;
+ if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
+ while (required_code_size >= (cfg->code_size - offset))
+ cfg->code_size *= 2;
+ cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ code = cfg->native_code + offset;
+ mono_jit_stats.code_reallocs++;
+ }
+
while (remaining_size >= 0x1000) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
if (cfg->arch.omit_fp) {
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
}
async_exc_point (code);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (cfg->arch.omit_fp)
mono_arch_unwindinfo_add_alloc_stack (&cfg->arch.unwindinfo, cfg->native_code, code, 0x1000);
#endif
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (cfg->arch.omit_fp)
mono_arch_unwindinfo_add_alloc_stack (&cfg->arch.unwindinfo, cfg->native_code, code, remaining_size);
#endif
case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
#endif
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_jit_thread_attach", TRUE);
amd64_patch (buf, code);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
if (lmf_addr_tls_offset != -1) {
/* Load lmf quicky using the FS register */
code = mono_amd64_emit_tls_get (code, AMD64_RAX, lmf_addr_tls_offset);
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
if (cfg->used_int_regs & (1 << AMD64_R15)) {
amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
}
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (cfg->used_int_regs & (1 << AMD64_RDI)) {
amd64_mov_reg_membase (code, AMD64_RDI, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), 8);
}
/* We have to shift the arguments left */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
for (i = 0; i < param_count; ++i) {
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
if (i < 3)
amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
else
mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
{
if (!tls_offset_inited) {
-#ifdef PLATFORM_WIN32
+#ifdef HOST_WIN32
/*
* We need to init this multiple times, since when we are first called, the key might not
* be initialized yet.
*/
appdomain_tls_offset = mono_domain_get_tls_key ();
lmf_tls_offset = mono_get_jit_tls_key ();
- thread_tls_offset = mono_thread_get_tls_key ();
lmf_addr_tls_offset = mono_get_jit_tls_key ();
/* Only 64 tls entries can be accessed using inline code */
appdomain_tls_offset = -1;
if (lmf_tls_offset >= 64)
lmf_tls_offset = -1;
- if (thread_tls_offset >= 64)
- thread_tls_offset = -1;
#else
tls_offset_inited = TRUE;
#ifdef MONO_XEN_OPT
appdomain_tls_offset = mono_domain_get_tls_offset ();
lmf_tls_offset = mono_get_lmf_tls_offset ();
lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
- thread_tls_offset = mono_thread_get_tls_offset ();
#endif
}
}
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
-
-MonoObject*
-mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
-{
- return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
-}
#endif
MonoVTable*
return ins;
}
-MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
-{
- MonoInst* ins;
-
- if (thread_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = thread_tls_offset;
- return ins;
-}
-
#define _CTX_REG(ctx,fld,i) ((gpointer)((&ctx->fld)[i]))
gpointer
g_assert_not_reached ();
}
}
+
+/* Soft Debug support */
+#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
+
+/*
+ * mono_arch_set_breakpoint:
+ *
+ * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
+ * The location should contain code emitted by OP_SEQ_POINT.
+ */
+void
+mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+ guint8 *orig_code = code;
+
+ /*
+ * In production, we will use int3 (has to fix the size in the md
+ * file). But that could confuse gdb, so during development, we emit a SIGSEGV
+ * instead.
+ */
+ g_assert (code [0] == 0x90);
+
+ g_assert (((guint64)bp_trigger_page >> 32) == 0);
+
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
+ g_assert (code - orig_code == BREAKPOINT_SIZE);
+}
+
+/*
+ * mono_arch_clear_breakpoint:
+ *
+ * Clear the breakpoint at IP.
+ */
+void
+mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+ int i;
+
+ for (i = 0; i < BREAKPOINT_SIZE; ++i)
+ x86_nop (code);
+}
+
+/*
+ * mono_arch_start_single_stepping:
+ *
+ * Start single stepping.
+ */
+void
+mono_arch_start_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+}
+
+/*
+ * mono_arch_stop_single_stepping:
+ *
+ * Stop single stepping.
+ */
+void
+mono_arch_stop_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+}
+
+/*
+ * mono_arch_is_single_step_event:
+ *
+ * Return whenever the machine state in SIGCTX corresponds to a single
+ * step event.
+ */
+gboolean
+mono_arch_is_single_step_event (void *info, void *sigctx)
+{
+#ifdef HOST_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*) info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+gboolean
+mono_arch_is_breakpoint_event (void *info, void *sigctx)
+{
+#ifdef HOST_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*) info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+/*
+ * mono_arch_get_ip_for_breakpoint:
+ *
+ * Convert the ip in CTX to the address where a breakpoint was placed.
+ */
+guint8*
+mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ /* size of xor r11, r11 */
+ ip -= 0;
+
+ return ip;
+}
+
+/*
+ * mono_arch_get_ip_for_single_step:
+ *
+ * Convert the ip in CTX to the address stored in seq_points.
+ */
+guint8*
+mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ /* Size of amd64_mov_reg_mem (r11) */
+ ip += 8;
+
+ return ip;
+}
+
+/*
+ * mono_arch_skip_breakpoint:
+ *
+ * Modify CTX so the ip is placed after the breakpoint instruction, so when
+ * we resume, the instruction is not executed again.
+ */
+void
+mono_arch_skip_breakpoint (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + BREAKPOINT_SIZE);
+}
+
+/*
+ * mono_arch_skip_single_step:
+ *
+ * Modify CTX so the ip is placed after the single step trigger instruction,
+ * we resume, the instruction is not executed again.
+ */
+void
+mono_arch_skip_single_step (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 8);
+}
+
+/*
+ * mono_arch_create_seq_point_info:
+ *
+ * Return a pointer to a data structure which is used by the sequence
+ * point implementation in AOTed code.
+ */
+gpointer
+mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
+{
+ NOT_IMPLEMENTED;
+ return NULL;
+}
+
+#endif