#include <mono/metadata/mono-debug.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-counters.h>
+#include <mono/utils/mono-mmap.h>
#include "trace.h"
#include "mini-x86.h"
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
-static gint thread_tls_offset = -1;
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
#define optimize_for_xen 0
#endif
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
static gboolean is_win32 = TRUE;
#else
static gboolean is_win32 = FALSE;
#define ARGS_OFFSET 8
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/* Under windows, the default pinvoke calling convention is stdcall */
#define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT))
#else
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
+/*
+ * The code generated for sequence points reads from this location, which is
+ * made read-only when single stepping is enabled.
+ */
+static gpointer ss_trigger_page;
+
+/* Enabled breakpoints read from this trigger page */
+static gpointer bp_trigger_page;
+
const char*
mono_arch_regname (int reg)
{
}
}
+void
+mono_x86_patch (unsigned char* code, gpointer target)
+{
+ x86_patch (code, (unsigned char*)target);
+}
typedef enum {
ArgInIReg,
guint32 freg_usage;
gboolean need_stack_align;
guint32 stack_align_amount;
+ gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
static X86_Reg_No param_regs [] = { 0 };
-#if defined(PLATFORM_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
+#if defined(TARGET_WIN32) || defined(__APPLE__) || defined(__FreeBSD__)
#define SMALL_STRUCTS_IN_REGS
static X86_Reg_No return_regs [] = { X86_EAX, X86_EDX };
#endif
* For x86 win32, see ???.
*/
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint32 i, gr, fr;
+ guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
- CallInfo *cinfo;
-
- if (mp)
- cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
- else
- cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
gr = 0;
fr = 0;
cinfo->ret.storage = ArgOnDoubleFpStack;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = X86_EAX;
break;
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
+ if (cinfo->ret.storage == ArgOnStack) {
+ cinfo->vtype_retaddr = TRUE;
/* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
+ }
break;
}
case MONO_TYPE_TYPEDBYREF:
- /* Same as a valuetype with size 24 */
- add_general (&gr, &stack_size, &cinfo->ret);
- ;
+ /* Same as a valuetype with size 12 */
+ cinfo->vtype_retaddr = TRUE;
break;
case MONO_TYPE_VOID:
cinfo->ret.storage = ArgNone;
}
}
- /* this */
- if (sig->hasthis)
- add_general (&gr, &stack_size, cinfo->args + 0);
+ pstart = 0;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (sig->params [0])))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+ pstart = 1;
+ }
+ add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis)
+ add_general (&gr, &stack_size, cinfo->args + 0);
+
+ if (cinfo->vtype_retaddr)
+ add_general (&gr, &stack_size, &cinfo->ret);
+ }
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (&gr, &stack_size, ainfo);
break;
}
return cinfo;
}
+static CallInfo*
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+{
+ int n = sig->hasthis + sig->param_count;
+ CallInfo *cinfo;
+
+ if (mp)
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+ else
+ cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
+
+ return get_call_info_internal (gsctx, cinfo, sig, is_pinvoke);
+}
+
/*
* mono_arch_get_argument_info:
* @csig: a method signature
* padding. arg_info should be large enought to hold param_count + 1 entries.
*
* Returns the size of the argument area on the stack.
+ * This should be signal safe, since it is called from
+ * mono_arch_find_jit_info_ext ().
+ * FIXME: The metadata calls might not be signal safe.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
int offset = 8;
CallInfo *cinfo;
- cinfo = get_call_info (NULL, NULL, csig, FALSE);
+ /* Avoid g_malloc as it is not signal safe */
+ cinfo = (CallInfo*)g_newa (guint8*, sizeof (CallInfo) + (sizeof (ArgInfo) * (csig->param_count + 1)));
- if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
- args_size += sizeof (gpointer);
- offset += 4;
- }
+ cinfo = get_call_info_internal (NULL, cinfo, csig, FALSE);
arg_info [0].offset = offset;
offset += 4;
}
+ if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
+ args_size += sizeof (gpointer);
+ offset += 4;
+ }
+
arg_info [0].size = args_size;
for (k = 0; k < param_count; k++) {
args_size += pad = (align - (args_size & (align - 1))) & (align - 1);
arg_info [k].pad = pad;
- g_free (cinfo);
-
return args_size;
}
mono_arch_init (void)
{
InitializeCriticalSection (&mini_arch_mutex);
+
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+ mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+
+ mono_aot_register_jit_icall ("mono_x86_throw_exception", mono_x86_throw_exception);
+ mono_aot_register_jit_icall ("mono_x86_throw_corlib_exception", mono_x86_throw_corlib_exception);
}
/*
guint32 opts = 0;
*exclude_mask = 0;
+
+ if (mono_aot_only)
+ /* The cpuid function allocates from the global codeman */
+ return opts;
+
/* Feature Flags function, flags returned in EDX. */
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 15)) {
*
* Returns a bitmask corresponding to all supported versions.
*
- * TODO detect other versions like SSE4a.
*/
guint32
mono_arch_cpu_enumerate_simd_versions (void)
int eax, ebx, ecx, edx;
guint32 sse_opts = 0;
+ if (mono_aot_only)
+ /* The cpuid function allocates from the global codeman */
+ return sse_opts;
+
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 25))
- sse_opts |= 1 << SIMD_VERSION_SSE1;
+ sse_opts |= SIMD_VERSION_SSE1;
if (edx & (1 << 26))
- sse_opts |= 1 << SIMD_VERSION_SSE2;
+ sse_opts |= SIMD_VERSION_SSE2;
if (ecx & (1 << 0))
- sse_opts |= 1 << SIMD_VERSION_SSE3;
+ sse_opts |= SIMD_VERSION_SSE3;
if (ecx & (1 << 9))
- sse_opts |= 1 << SIMD_VERSION_SSSE3;
+ sse_opts |= SIMD_VERSION_SSSE3;
if (ecx & (1 << 19))
- sse_opts |= 1 << SIMD_VERSION_SSE41;
+ sse_opts |= SIMD_VERSION_SSE41;
if (ecx & (1 << 20))
- sse_opts |= 1 << SIMD_VERSION_SSE42;
+ sse_opts |= SIMD_VERSION_SSE42;
}
+
+ /* Yes, all this needs to be done to check for sse4a.
+ See: "Amd: CPUID Specification"
+ */
+ if (cpuid (0x80000000, &eax, &ebx, &ecx, &edx)) {
+ /* eax greater or equal than 0x80000001, ebx = 'htuA', ecx = DMAc', edx = 'itne'*/
+ if ((((unsigned int) eax) >= 0x80000001) && (ebx == 0x68747541) && (ecx == 0x444D4163) && (edx == 0x69746E65)) {
+ cpuid (0x80000001, &eax, &ebx, &ecx, &edx);
+ if (ecx & (1 << 6))
+ sse_opts |= SIMD_VERSION_SSE4a;
+ }
+ }
+
+
return sse_opts;
}
MonoMethodHeader *header;
gboolean result = FALSE;
+#if defined(__APPLE__)
+ /*OSX requires stack frame code to have the correct alignment. */
+ return TRUE;
+#endif
+
if (cfg->arch.need_stack_frame_inited)
return cfg->arch.need_stack_frame;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
if (cfg->disable_omit_fp)
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
/* Allocate locals */
offsets = mono_allocate_stack_slots (cfg, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
+ char *mname = mono_method_full_name (cfg->method, TRUE);
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ g_free (mname);
+ return;
+ }
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
- int j;
LLVMCallInfo *linfo;
+ MonoType *t;
n = sig->param_count + sig->hasthis;
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage == ArgInIReg) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
+ linfo->vret_arg_index = cinfo->vret_arg_index;
}
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != ArgInIReg) {
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
linfo->args [i].storage = LLVMArgInFPReg;
break;
case ArgOnStack:
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
- linfo->args [i].storage = LLVMArgVtypeByVal;
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ if (mono_class_value_size (mono_class_from_mono_type (t), NULL) == 0)
+ /* LLVM seems to allocate argument space for empty structures too */
+ linfo->args [i].storage = LLVMArgNone;
+ else
+ linfo->args [i].storage = LLVMArgVtypeByVal;
} else {
linfo->args [i].storage = LLVMArgInIReg;
- if (!sig->params [i - sig->hasthis]->byref) {
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+ if (t->byref) {
+ if (t->type == MONO_TYPE_R4)
linfo->args [i].storage = LLVMArgInFPReg;
- } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+ else if (t->type == MONO_TYPE_R8)
linfo->args [i].storage = LLVMArgInFPReg;
- }
}
}
break;
ArgInfo *ainfo = cinfo->args + i;
MonoType *t;
+ if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && i == 0) {
+ /* Push the vret arg before the first argument */
+ MonoInst *vtarg;
+ MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
+ vtarg->type = STACK_MP;
+ vtarg->sreg1 = call->vret_var->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
- } else {
+ } else if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
vtarg->type = STACK_MP;
MONO_ADD_INS (cfg->cbb, vtarg);
}
- /* if the function returns a struct, the called method already does a ret $0x4 */
- cinfo->stack_usage -= 4;
+ /* if the function returns a struct on stack, the called method already does a ret $0x4 */
+ if (cinfo->ret.storage != ArgValuetypeInReg)
+ cinfo->stack_usage -= 4;
}
call->stack_usage = cinfo->stack_usage;
mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
{
guchar *code = p;
- int arg_size = 0, save_mode = SAVE_NONE;
+ int arg_size = 0, stack_usage = 0, save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
-
- switch (mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type) {
+ MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+
+ switch (ret_type->type) {
case MONO_TYPE_VOID:
/* special case string .ctor icall */
- if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
+ if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class) {
save_mode = SAVE_EAX;
- else
+ stack_usage = enable_arguments ? 8 : 4;
+ } else
save_mode = SAVE_NONE;
break;
case MONO_TYPE_I8:
case MONO_TYPE_U8:
save_mode = SAVE_EAX_EDX;
+ stack_usage = enable_arguments ? 16 : 8;
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
save_mode = SAVE_FP;
+ stack_usage = enable_arguments ? 16 : 8;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
save_mode = SAVE_EAX;
+ stack_usage = enable_arguments ? 8 : 4;
break;
}
/* Fall through */
case MONO_TYPE_VALUETYPE:
+ // FIXME: Handle SMALL_STRUCT_IN_REG here for proper alignment on darwin-x86
save_mode = SAVE_STRUCT;
+ stack_usage = enable_arguments ? 4 : 0;
break;
default:
save_mode = SAVE_EAX;
+ stack_usage = enable_arguments ? 8 : 4;
break;
}
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - stack_usage - 4);
+
switch (save_mode) {
case SAVE_EAX_EDX:
x86_push_reg (code, X86_EDX);
mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
x86_call_code (code, 0);
}
+
x86_alu_reg_imm (code, X86_ADD, X86_ESP, arg_size + 4);
switch (save_mode) {
default:
break;
}
+
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - stack_usage);
return code;
}
int sreg = tree->sreg1;
int need_touch = FALSE;
-#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
need_touch = TRUE;
#endif
return code;
}
+gboolean
+mono_x86_have_tls_get (void)
+{
+#ifdef __APPLE__
+ guint32 *ins = (guint32*)pthread_getspecific;
+ /*
+ * We're looking for these two instructions:
+ *
+ * mov 0x4(%esp),%eax
+ * mov %gs:0x48(,%eax,4),%eax
+ */
+ return ins [0] == 0x0424448b && ins [1] == 0x85048b65 && ins [2] == 0x00000048;
+#else
+ return TRUE;
+#endif
+}
+
/*
* mono_x86_emit_tls_get:
* @code: buffer to store code to
guint8*
mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
-#ifdef PLATFORM_WIN32
+#if defined(__APPLE__)
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_reg_mem (code, dreg, 0x48 + tls_offset * 4, 4);
+#elif defined(TARGET_WIN32)
/*
* See the Under the Hood article in the May 1996 issue of Microsoft Systems
* Journal and/or a disassembly of the TlsGet () function.
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_SEQ_POINT: {
+ int i;
+
+ if (cfg->compile_aot)
+ NOT_IMPLEMENTED;
+
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
+ x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)ss_trigger_page);
+
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+
+ /*
+ * A placeholder for a possible breakpoint inserted by
+ * mono_arch_set_breakpoint ().
+ */
+ for (i = 0; i < 6; ++i)
+ x86_nop (code);
+ break;
+ }
case OP_ADDCC:
case OP_IADDCC:
case OP_IADD:
*/
x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX);
x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
+ } else if (power == 0) {
+ x86_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
} else {
/* Based on gcc code */
x86_mov_reg_imm (code, ins->dreg, 0);
break;
case OP_LOAD_GOTADDR:
- x86_call_imm (code, 0);
- /*
- * The patch needs to point to the pop, since the GOT offset needs
- * to be added to that address.
- */
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
- x86_pop_reg (code, ins->dreg);
- x86_alu_reg_imm (code, X86_ADD, ins->dreg, 0xf0f0f0f0);
+ g_assert (ins->dreg == MONO_ARCH_GOT_REG);
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
break;
case OP_GOT_ENTRY:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
break;
}
case OP_THROW: {
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
break;
}
case OP_RETHROW: {
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
x86_call_imm (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
break;
case OP_START_HANDLER: {
case OP_STORER8_MEMBASE_REG:
x86_fst_membase (code, ins->inst_destbasereg, ins->inst_offset, TRUE, TRUE);
break;
- case OP_LOADR8_SPILL_MEMBASE:
- x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
- x86_fxch (code, 1);
- break;
case OP_LOADR8_MEMBASE:
x86_fld_membase (code, ins->inst_basereg, ins->inst_offset, TRUE);
break;
x86_fld_membase (code, X86_ESP, 0, FALSE);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
break;
- case OP_LCONV_TO_R_UN:
case OP_LCONV_TO_R_UN_2: {
static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
guint8 *br;
case OP_XORPD:
x86_sse_alu_pd_reg_reg (code, X86_SSE_XOR, ins->sreg1, ins->sreg2);
break;
+ case OP_SQRTPD:
+ x86_sse_alu_pd_reg_reg (code, X86_SSE_SQRT, ins->dreg, ins->sreg1);
+ break;
case OP_ADDSUBPD:
x86_sse_alu_pd_reg_reg (code, X86_SSE_ADDSUB, ins->sreg1, ins->sreg2);
break;
guint8 *code;
gboolean need_stack_frame;
- cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 10240);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
x86_patch (buf, code);
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
#endif
}
else {
- g_assert (!cfg->compile_aot);
- x86_push_imm (code, cfg->domain);
+ if (cfg->compile_aot) {
+ /*
+ * This goes before the saving of callee saved regs, so save the got reg
+ * ourselves.
+ */
+ x86_push_reg (code, MONO_ARCH_GOT_REG);
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
+ x86_push_imm (code, 0);
+ } else {
+ x86_push_imm (code, cfg->domain);
+ }
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
+ if (cfg->compile_aot)
+ x86_pop_reg (code, MONO_ARCH_GOT_REG);
}
}
pos += sizeof (MonoLMF);
/* save the current IP */
- mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
- x86_push_imm_template (code);
+ if (cfg->compile_aot) {
+ /* This pushes the current ip */
+ x86_call_imm (code, 0);
+ } else {
+ mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
+ x86_push_imm_template (code);
+ }
cfa_offset += sizeof (gpointer);
/* save all caller saved regs */
if (lmf_addr_tls_offset != -1) {
/* Load lmf quicky using the GS register */
code = mono_x86_emit_tls_get (code, X86_EAX, lmf_addr_tls_offset);
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/* The TLS key actually contains a pointer to the MonoJitTlsData structure */
/* FIXME: Add a separate key for LMF to avoid this */
x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
#endif
} else {
+ if (cfg->compile_aot)
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
}
if (alloc_size) {
/* See mono_emit_stack_alloc */
-#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
+ /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
+ guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 8; /*8 is the max size of x86_alu_reg_imm + x86_test_membase_reg*/
+ guint32 offset = code - cfg->native_code;
+ if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
+ while (required_code_size >= (cfg->code_size - offset))
+ cfg->code_size *= 2;
+ cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ code = cfg->native_code + offset;
+ mono_jit_stats.code_reallocs++;
+ }
while (remaining_size >= 0x1000) {
x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x1000);
x86_test_membase_reg (code, X86_ESP, 0, X86_ESP);
/* Compute size of code following the push <OFFSET> */
size = 5 + 5;
+ /*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
+
if ((code - cfg->native_code) - throw_ip < 126 - size) {
/* Use the shorter form */
buf = buf2 = code;
{
if (!tls_offset_inited) {
if (!getenv ("MONO_NO_TLS")) {
-#ifdef PLATFORM_WIN32
+#ifdef TARGET_WIN32
/*
* We need to init this multiple times, since when we are first called, the key might not
* be initialized yet.
*/
appdomain_tls_offset = mono_domain_get_tls_key ();
lmf_tls_offset = mono_get_jit_tls_key ();
- thread_tls_offset = mono_thread_get_tls_key ();
/* Only 64 tls entries can be accessed using inline code */
if (appdomain_tls_offset >= 64)
appdomain_tls_offset = -1;
if (lmf_tls_offset >= 64)
lmf_tls_offset = -1;
- if (thread_tls_offset >= 64)
- thread_tls_offset = -1;
#else
#if MONO_XEN_OPT
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
appdomain_tls_offset = mono_domain_get_tls_offset ();
lmf_tls_offset = mono_get_lmf_tls_offset ();
lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
- thread_tls_offset = mono_thread_get_tls_offset ();
#endif
}
}
#define BR_LARGE_SIZE 5
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
+#define DEBUG_IMT 0
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
if (!fail_tramp)
mono_stats.imt_thunks_size += code - start;
g_assert (code - start <= size);
+
+#if DEBUG_IMT
+ {
+ char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
+ mono_disassemble_code (NULL, (guint8*)start, code - start, buff);
+ g_free (buff);
+ }
+#endif
+
return start;
}
{
return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
}
-
-MonoObject*
-mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
-{
- MonoMethodSignature *sig = mono_method_signature (method);
- CallInfo *cinfo = get_call_info (gsctx, NULL, sig, FALSE);
- int this_argument_offset;
- MonoObject *this_argument;
-
- /*
- * this is the offset of the this arg from esp as saved at the start of
- * mono_arch_create_trampoline_code () in tramp-x86.c.
- */
- this_argument_offset = 5;
- if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
- this_argument_offset++;
-
- this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
-
- g_free (cinfo);
- return this_argument;
-}
#endif
MonoVTable*
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
+GSList*
+mono_arch_get_cie_program (void)
+{
+ GSList *l = NULL;
+
+ mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
+ mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);
+
+ return l;
+}
+
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return ins;
}
-MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
-{
- MonoInst* ins;
-
- if (thread_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = thread_tls_offset;
- return ins;
-}
-
guint32
mono_arch_get_patch_offset (guint8 *code)
{
return (gpointer)regs [reg];
}
+/*
+ * mono_x86_get_this_arg_offset:
+ *
+ * Return the offset of the stack location where this is passed during a virtual
+ * call.
+ */
+guint32
+mono_x86_get_this_arg_offset (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig)
+{
+ return 0;
+}
+
gpointer
mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig,
mgreg_t *regs, guint8 *code)
gpointer res;
int offset;
- /*
- * Avoid expensive calls to get_generic_context_from_code () + get_call_info
- * if possible.
- */
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- if (!gsctx && code)
- gsctx = mono_get_generic_context_from_code (code);
- cinfo = get_call_info (gsctx, NULL, sig, FALSE);
-
- offset = cinfo->args [0].offset;
- } else {
- offset = 0;
- }
+ offset = 0;
/*
* The stack looks like:
* <other args>
* <this=delegate>
- * <possible vtype return address>
* <return addr>
* <4 pointers pushed by mono_arch_create_trampoline_code ()>
*/
#define MAX_ARCH_DELEGATE_PARAMS 10
-gpointer
-mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+static gpointer
+get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
{
guint8 *code, *start;
- if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
- return NULL;
-
- /* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
- return NULL;
-
/*
* The stack contains:
* <delegate>
*/
if (has_target) {
- static guint8* cached = NULL;
- if (cached)
- return cached;
-
start = code = mono_global_codeman_reserve (64);
/* Replace the this argument with the target */
x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
g_assert ((code - start) < 64);
-
- mono_debug_add_delegate_trampoline (start, code - start);
-
- mono_memory_barrier ();
-
- cached = start;
} else {
- static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
- int code_reserve = 8 + (sig->param_count * 8);
-
- for (i = 0; i < sig->param_count; ++i)
- if (!mono_is_regsize_var (sig->params [i]))
- return NULL;
-
- code = cache [sig->param_count];
- if (code)
- return code;
+ int code_reserve = 8 + (param_count * 8);
/*
* The stack contains:
x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
/* move args up */
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = 0; i < param_count; ++i) {
x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
}
x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
g_assert ((code - start) < code_reserve);
+ }
+
+ mono_debug_add_delegate_trampoline (start, code - start);
- mono_debug_add_delegate_trampoline (start, code - start);
+ if (code_len)
+ *code_len = code - start;
+
+ return start;
+}
+
+GSList*
+mono_arch_get_delegate_invoke_impls (void)
+{
+ GSList *res = NULL;
+ guint8 *code;
+ guint32 code_len;
+ int i;
+
+ code = get_delegate_invoke_impl (TRUE, 0, &code_len);
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
+
+ for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ code = get_delegate_invoke_impl (FALSE, i, &code_len);
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
+ }
+
+ return res;
+}
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ guint8 *code, *start;
+
+ if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+ return NULL;
+
+ /* FIXME: Support more cases */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return NULL;
+
+ /*
+ * The stack contains:
+ * <delegate>
+ * <return addr>
+ */
+
+ if (has_target) {
+ static guint8* cached = NULL;
+ if (cached)
+ return cached;
+
+ if (mono_aot_only)
+ start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
+ else
+ start = get_delegate_invoke_impl (TRUE, 0, NULL);
+
+ mono_memory_barrier ();
+
+ cached = start;
+ } else {
+ static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+ int i = 0;
+
+ for (i = 0; i < sig->param_count; ++i)
+ if (!mono_is_regsize_var (sig->params [i]))
+ return NULL;
+
+ code = cache [sig->param_count];
+ if (code)
+ return code;
+
+ if (mono_aot_only) {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
+ start = mono_aot_get_trampoline (name);
+ g_free (name);
+ } else {
+ start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ }
mono_memory_barrier ();
#endif /* MONO_ARCH_SIMD_INTRINSICS */
}
+/*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
+gpointer
+mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
+{
+ int offset;
+ gpointer *sp, old_value;
+ char *bp;
+ const unsigned char *handler;
+
+ /*Decode the first instruction to figure out where did we store the spvar*/
+ /*Our jit MUST generate the following:
+ mov %esp, -?(%ebp)
+ Which is encoded as: 0x89 mod_rm.
+ mod_rm (esp, ebp, imm) which can be: (imm will never be zero)
+ mod (reg + imm8): 01 reg(esp): 100 rm(ebp): 101 -> 01100101 (0x65)
+ mod (reg + imm32): 10 reg(esp): 100 rm(ebp): 101 -> 10100101 (0xA5)
+ */
+ handler = clause->handler_start;
+
+ if (*handler != 0x89)
+ return NULL;
+
+ ++handler;
+
+ if (*handler == 0x65)
+ offset = *(signed char*)(handler + 1);
+ else if (*handler == 0xA5)
+ offset = *(int*)(handler + 1);
+ else
+ return NULL;
+
+ /*Load the spvar*/
+ bp = MONO_CONTEXT_GET_BP (ctx);
+ sp = *(gpointer*)(bp + offset);
+
+ old_value = *sp;
+ if (old_value < ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
+ return old_value;
+
+ *sp = new_value;
+
+ return old_value;
+}
+
+/*
+ * mono_aot_emit_load_got_addr:
+ *
+ * Emit code to load the got address.
+ * On x86, the result is placed into EBX.
+ */
+guint8*
+mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
+{
+ x86_call_imm (code, 0);
+ /*
+ * The patch needs to point to the pop, since the GOT offset needs
+ * to be added to that address.
+ */
+ if (cfg)
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
+ else
+ *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
+ x86_pop_reg (code, MONO_ARCH_GOT_REG);
+ x86_alu_reg_imm (code, X86_ADD, MONO_ARCH_GOT_REG, 0xf0f0f0f0);
+
+ return code;
+}
+
+/*
+ * mono_ppc_emit_load_aotconst:
+ *
+ * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
+ * TARGET from the mscorlib GOT in full-aot code.
+ * On x86, the GOT address is assumed to be in EBX, and the result is placed into
+ * EAX.
+ */
+guint8*
+mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
+{
+ /* Load the mscorlib got address */
+ x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_GOT_REG, sizeof (gpointer), 4);
+ *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
+ /* arch_emit_got_access () patches this */
+ x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0xf0f0f0f0, 4);
+
+ return code;
+}
+
+/* Can't put this into mini-x86.h */
+gpointer
+mono_x86_get_signal_exception_trampoline (MonoTrampInfo **info, gboolean aot);
+
+GSList *
+mono_arch_get_trampolines (gboolean aot)
+{
+ MonoTrampInfo *info;
+ GSList *tramps = NULL;
+
+ mono_x86_get_signal_exception_trampoline (&info, aot);
+
+ tramps = g_slist_append (tramps, info);
+
+ return tramps;
+}
+
+
+#if __APPLE__
+#define DBG_SIGNAL SIGBUS
+#else
+#define DBG_SIGNAL SIGSEGV
+#endif
+
+/* Soft Debug support */
+#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
+
+/*
+ * mono_arch_set_breakpoint:
+ *
+ * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
+ * The location should contain code emitted by OP_SEQ_POINT.
+ */
+void
+mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+
+ /*
+ * In production, we will use int3 (has to fix the size in the md
+ * file). But that could confuse gdb, so during development, we emit a SIGSEGV
+ * instead.
+ */
+ g_assert (code [0] == 0x90);
+ x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)bp_trigger_page);
+}
+
+/*
+ * mono_arch_clear_breakpoint:
+ *
+ * Clear the breakpoint at IP.
+ */
+void
+mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
+{
+ guint8 *code = ip;
+ int i;
+
+ for (i = 0; i < 6; ++i)
+ x86_nop (code);
+}
+
+/*
+ * mono_arch_start_single_stepping:
+ *
+ * Start single stepping.
+ */
+void
+mono_arch_start_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+}
+
+/*
+ * mono_arch_stop_single_stepping:
+ *
+ * Stop single stepping.
+ */
+void
+mono_arch_stop_single_stepping (void)
+{
+ mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+}
+
+/*
+ * mono_arch_is_single_step_event:
+ *
+ * Return whenever the machine state in SIGCTX corresponds to a single
+ * step event.
+ */
+gboolean
+mono_arch_is_single_step_event (void *info, void *sigctx)
+{
+#ifdef TARGET_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info; /* Sometimes the address is off by 4 */
+ if ((einfo->ExceptionInformation[1] >= ss_trigger_page && (guint8*)einfo->ExceptionInformation[1] <= (guint8*)ss_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*) info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_signo == DBG_SIGNAL && (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+gboolean
+mono_arch_is_breakpoint_event (void *info, void *sigctx)
+{
+#ifdef TARGET_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info; /* Sometimes the address is off by 4 */
+ if ((einfo->ExceptionInformation[1] >= bp_trigger_page && (guint8*)einfo->ExceptionInformation[1] <= (guint8*)bp_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*)info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_signo == DBG_SIGNAL && (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128))
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+/*
+ * mono_arch_get_ip_for_breakpoint:
+ *
+ * See mini-amd64.c for docs.
+ */
+guint8*
+mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ return ip;
+}
+
+#define BREAKPOINT_SIZE 6
+
+/*
+ * mono_arch_get_ip_for_single_step:
+ *
+ * See mini-amd64.c for docs.
+ */
+guint8*
+mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ /* Size of x86_alu_reg_imm */
+ ip += 6;
+
+ return ip;
+}
+
+/*
+ * mono_arch_skip_breakpoint:
+ *
+ * See mini-amd64.c for docs.
+ */
+void
+mono_arch_skip_breakpoint (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + BREAKPOINT_SIZE);
+}
+
+/*
+ * mono_arch_skip_single_step:
+ *
+ * See mini-amd64.c for docs.
+ */
+void
+mono_arch_skip_single_step (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 6);
+}
+
+/*
+ * mono_arch_get_seq_point_info:
+ *
+ * See mini-amd64.c for docs.
+ */
+gpointer
+mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
+{
+ NOT_IMPLEMENTED;
+ return NULL;
+}
+
+#endif
+