#define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
#endif
-/* amd64_mov_reg_imm () */
-#define BREAKPOINT_SIZE 8
-
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
/* Enabled breakpoints read from this trigger page */
static gpointer bp_trigger_page;
+/* The size of the breakpoint sequence */
+static int breakpoint_size;
+
+/* The size of the breakpoint instruction causing the actual fault */
+static int breakpoint_fault_size;
+
+/* The size of the single step instruction causing the actual fault */
+static int single_step_fault_size;
+
#ifdef HOST_WIN32
/* On Win64 always reserve first 32 bytes for first four arguments */
#define ARGS_OFFSET 48
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- if (sig->params [i]->byref) {
- add_general (&gr, &stack_size, ainfo);
- continue;
- }
ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
void
mono_arch_init (void)
{
+ int flags;
+
InitializeCriticalSection (&mini_arch_mutex);
- ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
- bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+#ifdef MONO_ARCH_NOMAP32BIT
+ flags = MONO_MMAP_READ;
+ /* amd64_mov_reg_imm () + amd64_mov_reg_membase () */
+ breakpoint_size = 13;
+ breakpoint_fault_size = 3;
+ /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */
+ single_step_fault_size = 5;
+#else
+ flags = MONO_MMAP_READ|MONO_MMAP_32BIT;
+ /* amd64_mov_reg_mem () */
+ breakpoint_size = 8;
+ breakpoint_fault_size = 8;
+ single_step_fault_size = 8;
+#endif
+
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
}
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 25))
- sse_opts |= 1 << SIMD_VERSION_SSE1;
+ sse_opts |= SIMD_VERSION_SSE1;
if (edx & (1 << 26))
- sse_opts |= 1 << SIMD_VERSION_SSE2;
+ sse_opts |= SIMD_VERSION_SSE2;
if (ecx & (1 << 0))
- sse_opts |= 1 << SIMD_VERSION_SSE3;
+ sse_opts |= SIMD_VERSION_SSE3;
if (ecx & (1 << 9))
- sse_opts |= 1 << SIMD_VERSION_SSSE3;
+ sse_opts |= SIMD_VERSION_SSSE3;
if (ecx & (1 << 19))
- sse_opts |= 1 << SIMD_VERSION_SSE41;
+ sse_opts |= SIMD_VERSION_SSE41;
if (ecx & (1 << 20))
- sse_opts |= 1 << SIMD_VERSION_SSE42;
+ sse_opts |= SIMD_VERSION_SSE42;
}
/* Yes, all this needs to be done to check for sse4a.
if ((((unsigned int) eax) >= 0x80000001) && (ebx == 0x68747541) && (ecx == 0x444D4163) && (edx == 0x69746E65)) {
cpuid (0x80000001, &eax, &ebx, &ecx, &edx);
if (ecx & (1 << 6))
- sse_opts |= 1 << SIMD_VERSION_SSE4a;
+ sse_opts |= SIMD_VERSION_SSE4a;
}
}
return sse_opts;
}
+#ifndef DISABLE_JIT
+
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
if (cfg->arch.omit_fp_computed)
return;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
int i;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
}
}
+ if (cfg->gen_seq_points) {
+ MonoInst *ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_trigger_page_var = ins;
+ }
+
#ifdef MONO_AMD64_NO_PUSHES
/*
* When this is set, we pass arguments on the stack by moves, and by allocating
ArgInfo *ainfo;
int j;
LLVMCallInfo *linfo;
+ MonoType *t;
n = sig->param_count + sig->hasthis;
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
linfo->args [i].storage = LLVMArgInFPReg;
break;
case ArgOnStack:
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
+ if (MONO_TYPE_ISSTRUCT (t)) {
linfo->args [i].storage = LLVMArgVtypeByVal;
} else {
linfo->args [i].storage = LLVMArgInIReg;
- if (!sig->params [i - sig->hasthis]->byref) {
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+ if (!t->byref) {
+ if (t->type == MONO_TYPE_R4)
linfo->args [i].storage = LLVMArgInFPReg;
- } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+ else if (t->type == MONO_TYPE_R8)
linfo->args [i].storage = LLVMArgInFPReg;
- }
}
}
break;
{
MonoType *ret = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret);
- if (!ret->byref) {
- if (ret->type == MONO_TYPE_R4) {
- if (COMPILE_LLVM (cfg))
- MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
- else
- MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
- return;
- } else if (ret->type == MONO_TYPE_R8) {
+ if (ret->type == MONO_TYPE_R4) {
+ if (COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
- return;
- }
+ else
+ MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
+ return;
+ } else if (ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
+#endif /* DISABLE_JIT */
+
#define EMIT_COND_BRANCH(ins,cond,sign) \
if (ins->inst_true_bb->native_offset) { \
x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
return -1;
}
+#ifndef DISABLE_JIT
+
#define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM)))
/*
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
- } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG)) {
+ } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
return code;
}
+#endif /* DISABLE_JIT */
+
/*
* mono_amd64_emit_tls_get:
* @code: buffer to store code to
* We do this _before_ the breakpoint, so single stepping after
* a breakpoint is hit will step to the next IL offset.
*/
- g_assert (((guint64)ss_trigger_page >> 32) == 0);
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
+ if (((guint64)ss_trigger_page >> 32) == 0)
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4);
+ else {
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
- if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4);
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
+ }
+ }
+ /*
+ * This is the address which is saved in seq points,
+ * get_ip_for_single_step () / get_ip_for_breakpoint () needs to compute this
+ * from the address of the instruction causing the fault.
+ */
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
- for (i = 0; i < BREAKPOINT_SIZE; ++i)
+ for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
break;
}
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
amd64_call_imm (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
/* Restore stack alignment */
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
break;
}
}
+#ifndef DISABLE_JIT
+
static int
get_max_epilog_size (MonoCompile *cfg)
{
gboolean args_clobbered = FALSE;
gboolean trace = FALSE;
- cfg->code_size = MAX (((MonoMethodNormal *)method)->header->code_size * 4, 10240);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
code = cfg->native_code = g_malloc (cfg->code_size);
/*
* The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
*/
- /* sp is saved right before calls */
+ /*
+ * sp is saved right before calls but we need to save it here too so
+ * async stack walks would work.
+ */
+ amd64_mov_membase_reg (code, cfg->frame_reg, cfg->arch.lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
/* Skip method (only needed for trampoline LMF frames) */
/* Save callee saved regs */
for (i = 0; i < MONO_MAX_IREGS; ++i) {
}
}
+ /* Initialize ss_trigger_page_var */
+ if (cfg->arch.ss_trigger_page_var) {
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
+
+ g_assert (!cfg->compile_aot);
+ g_assert (var->opcode == OP_REGOFFSET);
+
+ amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
+ amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8);
+ }
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
}
+#endif /* DISABLE_JIT */
+
void*
mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
{
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
+GSList*
+mono_arch_get_cie_program (void)
+{
+ GSList *l = NULL;
+
+ mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, AMD64_RSP, 8);
+ mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, AMD64_RIP, -8);
+
+ return l;
+}
+
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
* instead.
*/
g_assert (code [0] == 0x90);
+ if (breakpoint_size == 8) {
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
+ } else {
+ amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4);
+ }
- g_assert (((guint64)bp_trigger_page >> 32) == 0);
-
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
- g_assert (code - orig_code == BREAKPOINT_SIZE);
+ g_assert (code - orig_code == breakpoint_size);
}
/*
guint8 *code = ip;
int i;
- for (i = 0; i < BREAKPOINT_SIZE; ++i)
+ for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
}
+
+gboolean
+mono_arch_is_breakpoint_event (void *info, void *sigctx)
+{
+#ifdef HOST_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*) info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+/*
+ * mono_arch_get_ip_for_breakpoint:
+ *
+ * Convert the ip in CTX to the address where a breakpoint was placed.
+ */
+guint8*
+mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ /* ip points to the instruction causing the fault */
+ ip -= (breakpoint_size - breakpoint_fault_size);
+
+ return ip;
+}
+
+/*
+ * mono_arch_skip_breakpoint:
+ *
+ * Modify CTX so the ip is placed after the breakpoint instruction, so when
+ * we resume, the instruction is not executed again.
+ */
+void
+mono_arch_skip_breakpoint (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
+}
/*
* mono_arch_start_single_stepping:
#endif
}
-gboolean
-mono_arch_is_breakpoint_event (void *info, void *sigctx)
-{
-#ifdef HOST_WIN32
- EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
- return FALSE;
-#else
- siginfo_t* sinfo = (siginfo_t*) info;
- /* Sometimes the address is off by 4 */
- if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
- return TRUE;
- else
- return FALSE;
-#endif
-}
-
-/*
- * mono_arch_get_ip_for_breakpoint:
- *
- * Convert the ip in CTX to the address where a breakpoint was placed.
- */
-guint8*
-mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- /* size of xor r11, r11 */
- ip -= 0;
-
- return ip;
-}
-
/*
* mono_arch_get_ip_for_single_step:
*
{
guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
- /* Size of amd64_mov_reg_mem (r11) */
- ip += 8;
+ ip += single_step_fault_size;
return ip;
}
-/*
- * mono_arch_skip_breakpoint:
- *
- * Modify CTX so the ip is placed after the breakpoint instruction, so when
- * we resume, the instruction is not executed again.
- */
-void
-mono_arch_skip_breakpoint (MonoContext *ctx)
-{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + BREAKPOINT_SIZE);
-}
-
/*
* mono_arch_skip_single_step:
*
void
mono_arch_skip_single_step (MonoContext *ctx)
{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 8);
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + single_step_fault_size);
}
/*