#include <mono/metadata/mono-debug.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-counters.h>
+#include <mono/utils/mono-mmap.h>
#include "trace.h"
#include "mini-x86.h"
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
-static gint thread_tls_offset = -1;
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
#define optimize_for_xen 0
#endif
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
static gboolean is_win32 = TRUE;
#else
static gboolean is_win32 = FALSE;
#define ARGS_OFFSET 8
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/* Under windows, the default pinvoke calling convention is stdcall */
#define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
#else
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
+/*
+ * The code generated for sequence points reads from this location, which is
+ * made read-only when single stepping is enabled.
+ */
+static gpointer ss_trigger_page;
+
+/* Enabled breakpoints read from this trigger page */
+static gpointer bp_trigger_page;
+
const char*
mono_arch_regname (int reg)
{
static X86_Reg_No param_regs [] = { 0 };
-#if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
+#if defined(TARGET_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
#define SMALL_STRUCTS_IN_REGS
static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
#endif
* For x86 win32, see ???.
*/
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig, gboolean is_pinvoke)
{
guint32 i, gr, fr;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
- CallInfo *cinfo;
-
- if (mp)
- cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
- else
- cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
gr = 0;
fr = 0;
cinfo->ret.storage = ArgOnDoubleFpStack;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = X86_EAX;
break;
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (&gr, &stack_size, ainfo);
break;
}
return cinfo;
}
+static CallInfo*
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+{
+ int n = sig->hasthis + sig->param_count;
+ CallInfo *cinfo;
+
+ if (mp)
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ else
+ cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+
+ return get_call_info_internal (gsctx, cinfo, sig, is_pinvoke);
+}
+
/*
* mono_arch_get_argument_info:
* @csig: a method signature
* padding. arg_info should be large enought to hold param_count + 1 entries.
*
* Returns the size of the argument area on the stack.
+ * This should be signal safe, since it is called from
+ * mono_arch_find_jit_info_ext ().
+ * FIXME: The metadata calls might not be signal safe.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
int offset = 8;
CallInfo *cinfo;
- cinfo = get_call_info (NULL, NULL, csig, FALSE);
+ /* Avoid g_malloc as it is not signal safe */
+ cinfo = (CallInfo*)g_newa (guint8*, sizeof (CallInfo) + (sizeof (ArgInfo) * (csig->param_count + 1)));
+
+ cinfo = get_call_info_internal (NULL, cinfo, csig, FALSE);
if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
args_size += sizeof (gpointer);
args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
- g_free (cinfo);
-
return args_size;
}
mono_arch_init (void)
{
InitializeCriticalSection (&mini_arch_mutex);
+
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
}
/*
*
* Returns a bitmask corresponding to all supported versions.
*
- * TODO detect other versions like SSE4a.
*/
guint32
mono_arch_cpu_enumerate_simd_versions (void)
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 25))
- sse_opts |= 1 << SIMD_VERSION_SSE1;
+ sse_opts |= SIMD_VERSION_SSE1;
if (edx & (1 << 26))
- sse_opts |= 1 << SIMD_VERSION_SSE2;
+ sse_opts |= SIMD_VERSION_SSE2;
if (ecx & (1 << 0))
- sse_opts |= 1 << SIMD_VERSION_SSE3;
+ sse_opts |= SIMD_VERSION_SSE3;
if (ecx & (1 << 9))
- sse_opts |= 1 << SIMD_VERSION_SSSE3;
+ sse_opts |= SIMD_VERSION_SSSE3;
if (ecx & (1 << 19))
- sse_opts |= 1 << SIMD_VERSION_SSE41;
+ sse_opts |= SIMD_VERSION_SSE41;
if (ecx & (1 << 20))
- sse_opts |= 1 << SIMD_VERSION_SSE42;
+ sse_opts |= SIMD_VERSION_SSE42;
}
+
+ /* Yes, all this needs to be done to check for sse4a.
+ See: "Amd: CPUID Specification"
+ */
+ if (cpuid (0x80000000, &eax, &ebx, &ecx, &edx)) {
+ /* eax greater or equal than 0x80000001, ebx = 'htuA', ecx = DMAc', edx = 'itne'*/
+ if ((((unsigned int) eax) >= 0x80000001) && (ebx == 0x68747541) && (ecx == 0x444D4163) && (edx == 0x69746E65)) {
+ cpuid (0x80000001, &eax, &ebx, &ecx, &edx);
+ if (ecx & (1 << 6))
+ sse_opts |= SIMD_VERSION_SSE4a;
+ }
+ }
+
+
return sse_opts;
}
if (cfg->arch.need_stack_frame_inited)
return cfg->arch.need_stack_frame;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
if (cfg->disable_omit_fp)
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
/* Allocate locals */
offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
+ char *mname = mono_method_full_name (cfg->method, TRUE);
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ g_free (mname);
+ return;
+ }
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
- int j;
LLVMCallInfo *linfo;
+ MonoType *t;
n = sig->param_count + sig->hasthis;
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
linfo->args [i].storage = LLVMArgInFPReg;
break;
case ArgOnStack:
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
- linfo->args [i].storage = LLVMArgVtypeByVal;
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ if (mono_class_value_size (mono_class_from_mono_type (t), NULL) == 0)
+ /* LLVM seems to allocate argument space for empty structures too */
+ linfo->args [i].storage = LLVMArgNone;
+ else
+ linfo->args [i].storage = LLVMArgVtypeByVal;
} else {
linfo->args [i].storage = LLVMArgInIReg;
- if (!sig->params [i - sig->hasthis]->byref) {
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+ if (t->byref) {
+ if (t->type == MONO_TYPE_R4)
linfo->args [i].storage = LLVMArgInFPReg;
- } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+ else if (t->type == MONO_TYPE_R8)
linfo->args [i].storage = LLVMArgInFPReg;
- }
}
}
break;
MONO_ADD_INS (cfg->cbb, vtarg);
}
- /* if the function returns a struct, the called method already does a ret $0x4 */
- cinfo->stack_usage -= 4;
+ /* if the function returns a struct on stack, the called method already does a ret $0x4 */
+ if (cinfo->ret.storage != ArgValuetypeInReg)
+ cinfo->stack_usage -= 4;
}
call->stack_usage = cinfo->stack_usage;
guchar *code = p;
int arg_size = 0, stack_usage = 0, save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
-
- switch (mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type) {
+ MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+
+ switch (ret_type->type) {
case MONO_TYPE_VOID:
/* special case string .ctor icall */
if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class) {
stack_usage = enable_arguments ? 16 : 8;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
save_mode = SAVE_EAX;
stack_usage = enable_arguments ? 8 : 4;
break;
int sreg = tree->sreg1;
int need_touch = FALSE;
-#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
need_touch = TRUE;
#endif
return code;
}
+gboolean
+mono_x86_have_tls_get (void)
+{
+#ifdef __APPLE__
+ guint32 *ins = (guint32*)pthread_getspecific;
+ /*
+ * We're looking for these two instructions:
+ *
+ * mov 0x4(%esp),%eax
+ * mov %gs:0x48(,%eax,4),%eax
+ */
+ return ins [0] == 0x0424448b && ins [1] == 0x85048b65 && ins [2] == 0x00000048;
+#else
+ return TRUE;
+#endif
+}
+
/*
* mono_x86_emit_tls_get:
* @code: buffer to store code to
guint8*
mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
-#ifdef PLATFORM_WIN32
+#if defined(__APPLE__)
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_reg_mem (code, dreg, 0x48 + tls_offset * 4, 4);
+#elif defined(TARGET_WIN32)
/*
* See the Under the Hood article in the May 1996 issue of Microsoft Systems
* Journal and/or a disassembly of the TlsGet () function.
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_SEQ_POINT: {
+ int i;
+
+ if (cfg->compile_aot)
+ NOT_IMPLEMENTED;
+
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
+ x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)ss_trigger_page);
+
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+
+ /*
+ * A placeholder for a possible breakpoint inserted by
+ * mono_arch_set_breakpoint ().
+ */
+ for (i = 0; i < 6; ++i)
+ x86_nop (code);
+ break;
+ }
case OP_ADDCC:
case OP_IADDCC:
case OP_IADD:
*/
x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX);
x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
+ } else if (power == 0) {
+ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
} else {
/* Based on gcc code */
x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
x86_call_imm (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
break;
case OP_START_HANDLER: {
x86_fld_membase (code, X86_ESP, 0, FALSE);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
break;
- case OP_LCONV_TO_R_UN:
case OP_LCONV_TO_R_UN_2: {
static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
guint8 *br;
guint8 *code;
gboolean need_stack_frame;
- cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 10240);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
x86_patch (buf, code);
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
if (method->save_lmf) {
pos += sizeof (MonoLMF);
+ if (cfg->compile_aot)
+ cfg->disable_aot = TRUE;
+
/* save the current IP */
mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
x86_push_imm_template (code);
if (lmf_addr_tls_offset != -1) {
/* Load lmf quicky using the GS register */
code = mono_x86_emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
if (alloc_size) {
/* See mono_emit_stack_alloc */
-#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
+ /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
+ guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 8; /*8 is the max size of x86_alu_reg_imm + x86_test_membase_reg*/
+ guint32 offset = code - cfg->native_code;
+ if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
+ while (required_code_size >= (cfg->code_size - offset))
+ cfg->code_size *= 2;
+ cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ code = cfg->native_code + offset;
+ mono_jit_stats.code_reallocs++;
+ }
while (remaining_size >= 0x1000) {
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
{
if (!tls_offset_inited) {
if (!getenv ("MONO_NO_TLS")) {
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/*
* We need to init this multiple times, since when we are first called, the key might not
* be initialized yet.
*/
appdomain_tls_offset = mono_domain_get_tls_key ();
lmf_tls_offset = mono_get_jit_tls_key ();
- thread_tls_offset = mono_thread_get_tls_key ();
/* Only 64 tls entries can be accessed using inline code */
if (appdomain_tls_offset >= 64)
appdomain_tls_offset = -1;
if (lmf_tls_offset >= 64)
lmf_tls_offset = -1;
- if (thread_tls_offset >= 64)
- thread_tls_offset = -1;
#else
#if MONO_XEN_OPT
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
appdomain_tls_offset = mono_domain_get_tls_offset ();
lmf_tls_offset = mono_get_lmf_tls_offset ();
lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
- thread_tls_offset = mono_thread_get_tls_offset ();
#endif
}
}
#define BR_LARGE_SIZE 5
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
+#define DEBUG_IMT 0
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
if (!fail_tramp)
mono_stats.imt_thunks_size += code - start;
g_assert (code - start <= size);
+
+#if DEBUG_IMT
+ {
+ char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
+ mono_disassemble_code (NULL, (guint8*)start, code - start, buff);
+ g_free (buff);
+ }
+#endif
+
return start;
}
{
return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
}
-
-MonoObject*
-mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
-{
- MonoMethodSignature *sig = mono_method_signature (method);
- CallInfo *cinfo = get_call_info (gsctx, NULL, sig, FALSE);
- int this_argument_offset;
- MonoObject *this_argument;
-
- /*
- * this is the offset of the this arg from esp as saved at the start of
- * mono_arch_create_trampoline_code () in tramp-x86.c.
- */
- this_argument_offset = 5;
- if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
- this_argument_offset++;
-
- this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
-
- g_free (cinfo);
- return this_argument;
-}
#endif
MonoVTable*
return ins;
}
-MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
-{
- MonoInst* ins;
-
- if (thread_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = thread_tls_offset;
- return ins;
-}
-
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return (gpointer)regs [reg];
}
+/*
+ * mono_x86_get_this_arg_offset:
+ *
+ * Return the offset of the stack location where this is passed during a virtual
+ * call.
+ */
+guint32
+mono_x86_get_this_arg_offset (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig)
+{
+ CallInfo *cinfo = NULL;
+ int offset;
+
+ if (MONO_TYPE_ISSTRUCT (sig->ret)) {
+ cinfo = get_call_info (gsctx, NULL, sig, FALSE);
+
+ offset = cinfo->args [0].offset;
+ } else {
+ offset = 0;
+ }
+
+ return offset;
+}
+
gpointer
mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig,
mgreg_t *regs, guint8 *code)
#endif /* MONO_ARCH_SIMD_INTRINSICS */
}
+/*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
+gpointer
+mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
+{
+ int offset;
+ gpointer *sp, old_value;
+ char *bp;
+ const unsigned char *handler;
+
+ /*Decode the first instruction to figure out where did we store the spvar*/
+ /*Our jit MUST generate the following:
+ mov %esp, -?(%ebp)
+ Which is encoded as: 0x89 mod_rm.
+ mod_rm (esp, ebp, imm) which can be: (imm will never be zero)
+ mod (reg + imm8): 01 reg(esp): 100 rm(ebp): 101 -> 01100101 (0x65)
+ mod (reg + imm32): 10 reg(esp): 100 rm(ebp): 101 -> 10100101 (0xA5)
+ */
+ handler = clause->handler_start;
+
+ if (*handler != 0x89)
+ return NULL;
+
+ ++handler;
+
+ if (*handler == 0x65)
+ offset = *(signed char*)(handler + 1);
+ else if (*handler == 0xA5)
+ offset = *(int*)(handler + 1);
+ else
+ return NULL;
+
+ /*Load the spvar*/
+ bp = MONO_CONTEXT_GET_BP (ctx);
+ sp = *(gpointer*)(bp + offset);
+
+ old_value = *sp;
+ if (old_value < ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
+ return old_value;
+
+ *sp = new_value;
+
+ return old_value;
+}
+
+#if __APPLE__
+#define DBG_SIGNAL SIGBUS
+#else
+#define DBG_SIGNAL SIGSEGV
+#endif
+
+/* Soft Debug support */
+#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
+
+/*
+ * mono_arch_set_breakpoint:
+ *
+ * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
+ * The location should contain code emitted by OP_SEQ_POINT.
+ */
+void
+mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+
+ /*
+ * In production, we will use int3 (has to fix the size in the md
+ * file). But that could confuse gdb, so during development, we emit a SIGSEGV
+ * instead.
+ */
+ g_assert (code [0] == 0x90);
+ x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)bp_trigger_page);
+}
+
+/*
+ * mono_arch_clear_breakpoint:
+ *
+ * Clear the breakpoint at IP.
+ */
+void
+mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+ int i;
+
+ for (i = 0; i < 6; ++i)
+ x86_nop (code);
+}
+
+/*
+ * mono_arch_start_single_stepping:
+ *
+ * Start single stepping.
+ */
+void
+mono_arch_start_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+}
+
+/*
+ * mono_arch_stop_single_stepping:
+ *
+ * Stop single stepping.
+ */
+void
+mono_arch_stop_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+}
+
+/*
+ * mono_arch_is_single_step_event:
+ *
+ * Return whenever the machine state in SIGCTX corresponds to a single
+ * step event.
+ */
+gboolean
+mono_arch_is_single_step_event (void *info, void *sigctx)
+{
+#ifdef TARGET_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info; /* Sometimes the address is off by 4 */
+ if ((einfo->ExceptionInformation[1] >= ss_trigger_page && (guint8*)einfo->ExceptionInformation[1] <= (guint8*)ss_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*) info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_signo == DBG_SIGNAL && (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+gboolean
+mono_arch_is_breakpoint_event (void *info, void *sigctx)
+{
+#ifdef TARGET_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info; /* Sometimes the address is off by 4 */
+ if ((einfo->ExceptionInformation[1] >= bp_trigger_page && (guint8*)einfo->ExceptionInformation[1] <= (guint8*)bp_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*)info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_signo == DBG_SIGNAL && (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+/*
+ * mono_arch_get_ip_for_breakpoint:
+ *
+ * See mini-amd64.c for docs.
+ */
+guint8*
+mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ return ip;
+}
+
+#define BREAKPOINT_SIZE 6
+
+/*
+ * mono_arch_get_ip_for_single_step:
+ *
+ * See mini-amd64.c for docs.
+ */
+guint8*
+mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ /* Size of x86_alu_reg_imm */
+ ip += 6;
+
+ return ip;
+}
+
+/*
+ * mono_arch_skip_breakpoint:
+ *
+ * See mini-amd64.c for docs.
+ */
+void
+mono_arch_skip_breakpoint (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + BREAKPOINT_SIZE);
+}
+
+/*
+ * mono_arch_skip_single_step:
+ *
+ * See mini-amd64.c for docs.
+ */
+void
+mono_arch_skip_single_step (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 6);
+}
+
+/*
+ * mono_arch_get_seq_point_info:
+ *
+ * See mini-amd64.c for docs.
+ */
+gpointer
+mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
+{
+ NOT_IMPLEMENTED;
+ return NULL;
+}
+
+#endif
+