#include "cpu-amd64.h"
#include "debugger-agent.h"
-/*
- * Can't define this in mini-amd64.h cause that would turn on the generic code in
- * method-to-ir.c.
- */
-#define MONO_ARCH_IMT_REG AMD64_R11
-
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
#define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
#endif
-/* amd64_mov_reg_imm () */
-#define BREAKPOINT_SIZE 8
-
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
/* Enabled breakpoints read from this trigger page */
static gpointer bp_trigger_page;
+/* The size of the breakpoint sequence */
+static int breakpoint_size;
+
+/* The size of the breakpoint instruction causing the actual fault */
+static int breakpoint_fault_size;
+
+/* The size of the single step instruction causing the actual fault */
+static int single_step_fault_size;
+
#ifdef HOST_WIN32
/* On Win64 always reserve first 32 bytes for first four arguments */
#define ARGS_OFFSET 48
guint32 freg_usage;
gboolean need_stack_align;
gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
static CallInfo*
get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
{
- guint32 i, gr, fr;
+ guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
cinfo->ret.reg = AMD64_XMM0;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
if (cinfo->ret.storage == ArgOnStack) {
cinfo->vtype_retaddr = TRUE;
/* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
}
break;
}
case MONO_TYPE_TYPEDBYREF:
/* Same as a valuetype with size 24 */
- add_general (&gr, &stack_size, &cinfo->ret);
- ;
+ cinfo->vtype_retaddr = TRUE;
break;
case MONO_TYPE_VOID:
break;
}
}
- /* this */
- if (sig->hasthis)
- add_general (&gr, &stack_size, cinfo->args + 0);
+ pstart = 0;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+ pstart = 1;
+ }
+ add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis)
+ add_general (&gr, &stack_size, cinfo->args + 0);
+
+ if (cinfo->vtype_retaddr)
+ add_general (&gr, &stack_size, &cinfo->ret);
+ }
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- if (sig->params [i]->byref) {
- add_general (&gr, &stack_size, ainfo);
- continue;
- }
ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
void
mono_arch_init (void)
{
+ int flags;
+
InitializeCriticalSection (&mini_arch_mutex);
- ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
- bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
+#ifdef MONO_ARCH_NOMAP32BIT
+ flags = MONO_MMAP_READ;
+ /* amd64_mov_reg_imm () + amd64_mov_reg_membase () */
+ breakpoint_size = 13;
+ breakpoint_fault_size = 3;
+ /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */
+ single_step_fault_size = 5;
+#else
+ flags = MONO_MMAP_READ|MONO_MMAP_32BIT;
+ /* amd64_mov_reg_mem () */
+ breakpoint_size = 8;
+ breakpoint_fault_size = 8;
+ single_step_fault_size = 8;
+#endif
+
+ ss_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
+ bp_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+
+ mono_aot_register_jit_icall ("mono_amd64_throw_exception", mono_amd64_throw_exception);
+ mono_aot_register_jit_icall ("mono_amd64_throw_corlib_exception", mono_amd64_throw_corlib_exception);
+ mono_aot_register_jit_icall ("mono_amd64_get_original_ip", mono_amd64_get_original_ip);
}
/*
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 25))
- sse_opts |= 1 << SIMD_VERSION_SSE1;
+ sse_opts |= SIMD_VERSION_SSE1;
if (edx & (1 << 26))
- sse_opts |= 1 << SIMD_VERSION_SSE2;
+ sse_opts |= SIMD_VERSION_SSE2;
if (ecx & (1 << 0))
- sse_opts |= 1 << SIMD_VERSION_SSE3;
+ sse_opts |= SIMD_VERSION_SSE3;
if (ecx & (1 << 9))
- sse_opts |= 1 << SIMD_VERSION_SSSE3;
+ sse_opts |= SIMD_VERSION_SSSE3;
if (ecx & (1 << 19))
- sse_opts |= 1 << SIMD_VERSION_SSE41;
+ sse_opts |= SIMD_VERSION_SSE41;
if (ecx & (1 << 20))
- sse_opts |= 1 << SIMD_VERSION_SSE42;
+ sse_opts |= SIMD_VERSION_SSE42;
}
/* Yes, all this needs to be done to check for sse4a.
if ((((unsigned int) eax) >= 0x80000001) && (ebx == 0x68747541) && (ecx == 0x444D4163) && (edx == 0x69746E65)) {
cpuid (0x80000001, &eax, &ebx, &ecx, &edx);
if (ecx & (1 << 6))
- sse_opts |= 1 << SIMD_VERSION_SSE4a;
+ sse_opts |= SIMD_VERSION_SSE4a;
}
}
return sse_opts;
}
+#ifndef DISABLE_JIT
+
GList *
mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
{
if (cfg->arch.omit_fp_computed)
return;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
int i;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
/* Allocate locals */
if (!cfg->globalra) {
offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
+ char *mname = mono_method_full_name (cfg->method, TRUE);
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ g_free (mname);
+ return;
+ }
+
if (locals_stack_align) {
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
}
+ if (cfg->gen_seq_points) {
+ MonoInst *ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_trigger_page_var = ins;
+ }
+
#ifdef MONO_AMD64_NO_PUSHES
/*
* When this is set, we pass arguments on the stack by moves, and by allocating
ArgInfo *ainfo;
int j;
LLVMCallInfo *linfo;
+ MonoType *t;
n = sig->param_count + sig->hasthis;
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage == ArgInIReg) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
+ linfo->vret_arg_index = cinfo->vret_arg_index;
}
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
linfo->args [i].storage = LLVMArgInFPReg;
break;
case ArgOnStack:
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
+ if (MONO_TYPE_ISSTRUCT (t)) {
linfo->args [i].storage = LLVMArgVtypeByVal;
} else {
linfo->args [i].storage = LLVMArgInIReg;
- if (!sig->params [i - sig->hasthis]->byref) {
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+ if (!t->byref) {
+ if (t->type == MONO_TYPE_R4)
linfo->args [i].storage = LLVMArgInFPReg;
- } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+ else if (t->type == MONO_TYPE_R8)
linfo->args [i].storage = LLVMArgInFPReg;
- }
}
}
break;
}
g_assert (in->klass);
+ if (ainfo->storage == ArgOnStack && size >= 10000) {
+ /* Avoid asserts in emit_memcpy () */
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Passing an argument of size '%d'.", size);
+ /* Continue normally */
+ }
+
if (size > 0) {
MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
arg->sreg1 = in->dreg;
{
MonoType *ret = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret);
- if (!ret->byref) {
- if (ret->type == MONO_TYPE_R4) {
- if (COMPILE_LLVM (cfg))
- MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
- else
- MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
- return;
- } else if (ret->type == MONO_TYPE_R8) {
+ if (ret->type == MONO_TYPE_R4) {
+ if (COMPILE_LLVM (cfg))
MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
- return;
- }
+ else
+ MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
+ return;
+ } else if (ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
}
MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
}
+#endif /* DISABLE_JIT */
+
#define EMIT_COND_BRANCH(ins,cond,sign) \
if (ins->inst_true_bb->native_offset) { \
x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
/* These methods are allocated using malloc */
near_call = FALSE;
- if (cfg->compile_aot) {
- near_call = TRUE;
- no_patch = TRUE;
- }
-
#ifdef MONO_ARCH_NOMAP32BIT
near_call = FALSE;
#endif
if (optimize_for_xen)
near_call = FALSE;
+ if (cfg->compile_aot) {
+ near_call = TRUE;
+ no_patch = TRUE;
+ }
+
if (near_call) {
/*
* Align the call displacement to an address divisible by 4 so it does
return -1;
}
+#ifndef DISABLE_JIT
+
#define INST_IGNORES_CFLAGS(opcode) (!(((opcode) == OP_ADC) || ((opcode) == OP_ADC_IMM) || ((opcode) == OP_IADC) || ((opcode) == OP_IADC_IMM) || ((opcode) == OP_SBB) || ((opcode) == OP_SBB_IMM) || ((opcode) == OP_ISBB) || ((opcode) == OP_ISBB_IMM)))
/*
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
- } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG)) {
+ } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
return code;
}
+#endif /* DISABLE_JIT */
+
/*
* mono_amd64_emit_tls_get:
* @code: buffer to store code to
case OP_NOT_NULL:
break;
case OP_SEQ_POINT: {
- int i, il_offset;
+ int i;
if (cfg->compile_aot)
NOT_IMPLEMENTED;
* We do this _before_ the breakpoint, so single stepping after
* a breakpoint is hit will step to the next IL offset.
*/
- g_assert (((guint64)ss_trigger_page >> 32) == 0);
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
+ if (((guint64)ss_trigger_page >> 32) == 0)
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4);
+ else {
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
- if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4);
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
+ }
+ }
- il_offset = ins->inst_imm;
+ /*
+ * This is the address which is saved in seq points,
+ * get_ip_for_single_step () / get_ip_for_breakpoint () needs to compute this
+ * from the address of the instruction causing the fault.
+ */
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
- if (!cfg->seq_points)
- cfg->seq_points = g_ptr_array_new ();
- g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
- g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
- for (i = 0; i < BREAKPOINT_SIZE; ++i)
+ for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
break;
}
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
- if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
- /*
- * Can't use R11 because it is clobbered by the trampoline
- * code, and the reg value is needed by get_vcall_slot_addr.
- */
- amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
- ins->sreg1 = AMD64_RAX;
- }
-
- /*
- * Emit a few nops to simplify get_vcall_slot ().
- */
- amd64_nop (code);
- amd64_nop (code);
- amd64_nop (code);
-
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
amd64_call_imm (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
/* Restore stack alignment */
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
break;
}
}
+#ifndef DISABLE_JIT
+
static int
get_max_epilog_size (MonoCompile *cfg)
{
gboolean args_clobbered = FALSE;
gboolean trace = FALSE;
- cfg->code_size = MAX (((MonoMethodNormal *)method)->header->code_size * 4, 10240);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
code = cfg->native_code = g_malloc (cfg->code_size);
/* See mono_emit_stack_alloc */
#if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
+ /*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
+ guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 10; /*10 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/
+ guint32 offset = code - cfg->native_code;
+ if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
+ while (required_code_size >= (cfg->code_size - offset))
+ cfg->code_size *= 2;
+ cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ code = cfg->native_code + offset;
+ mono_jit_stats.code_reallocs++;
+ }
+
while (remaining_size >= 0x1000) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
if (cfg->arch.omit_fp) {
}
#endif
+#ifndef TARGET_WIN32
+ if (mini_get_debug_options ()->init_stacks) {
+ /* Fill the stack frame with a dummy value to force deterministic behavior */
+
+ /* Save registers to the red zone */
+ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDI, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, -16, AMD64_RCX, 8);
+
+ amd64_mov_reg_imm (code, AMD64_RAX, 0x2a2a2a2a2a2a2a2a);
+ amd64_mov_reg_imm (code, AMD64_RCX, alloc_size / 8);
+ amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
+
+ amd64_cld (code);
+ amd64_prefix (code, X86_REP_PREFIX);
+ amd64_stosl (code);
+
+ amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
+ }
+#endif
+
/* Save LMF */
if (method->save_lmf) {
/*
* The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
*/
- /* sp is saved right before calls */
+ /*
+ * sp is saved right before calls but we need to save it here too so
+ * async stack walks would work.
+ */
+ amd64_mov_membase_reg (code, cfg->frame_reg, cfg->arch.lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
/* Skip method (only needed for trampoline LMF frames) */
/* Save callee saved regs */
for (i = 0; i < MONO_MAX_IREGS; ++i) {
}
}
+ /* Initialize ss_trigger_page_var */
+ if (cfg->arch.ss_trigger_page_var) {
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
+
+ g_assert (!cfg->compile_aot);
+ g_assert (var->opcode == OP_REGOFFSET);
+
+ amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
+ amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8);
+ }
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
exc_classes [nthrows] = exc_class;
exc_throw_start [nthrows] = code;
}
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, exc_class->type_token);
+ amd64_mov_reg_imm (code, AMD64_ARG_REG1, exc_class->type_token - MONO_TOKEN_TYPE_DEF);
patch_info->type = MONO_PATCH_INFO_NONE;
}
+#endif /* DISABLE_JIT */
+
void*
mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
{
guchar *code = p;
int save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
- int rtype = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret)->type;
+ MonoType *ret_type = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret);
- switch (rtype) {
+ switch (ret_type->type) {
case MONO_TYPE_VOID:
/* special case string .ctor icall */
if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
save_mode = SAVE_XMM;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
save_mode = SAVE_EAX;
break;
}
return can_write;
}
-gpointer
-mono_arch_get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
-{
- guint8 buf [10];
- guint32 reg;
- gint32 disp;
- guint8 rex = 0;
- MonoJitInfo *ji = NULL;
-
-#ifdef ENABLE_LLVM
- /* code - 9 might be before the start of the method */
- /* FIXME: Avoid this expensive call somehow */
- ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
-#endif
-
- mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
- code = buf + 9;
-
- *displacement = 0;
-
- code -= 7;
-
- /*
- * A given byte sequence can match more than case here, so we have to be
- * really careful about the ordering of the cases. Longer sequences
- * come first.
- * There are two types of calls:
- * - direct calls: 0xff address_byte 8/32 bits displacement
- * - indirect calls: nop nop nop <call>
- * The nops make sure we don't confuse the instruction preceeding an indirect
- * call with a direct call.
- */
- if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
- /* call OFFSET(%rip) */
- disp = *(guint32*)(code + 3);
- return (gpointer*)(code + disp + 7);
- } else if ((code [0] == 0xff) && (amd64_modrm_reg (code [1]) == 0x2) && (amd64_modrm_mod (code [1]) == 0x2) && (amd64_sib_index (code [2]) == 4) && (amd64_sib_scale (code [2]) == 0)) {
- /* call *[reg+disp32] using indexed addressing */
- /* The LLVM JIT emits this, and we emit it too for %r12 */
- if (IS_REX (code [-1])) {
- rex = code [-1];
- g_assert (amd64_rex_x (rex) == 0);
- }
- reg = amd64_sib_base (code [2]);
- disp = *(gint32*)(code + 3);
- } else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
- /* call *[reg+disp32] */
- if (IS_REX (code [0]))
- rex = code [0];
- reg = amd64_modrm_rm (code [2]);
- disp = *(gint32*)(code + 3);
- /* R10 is clobbered by the IMT thunk code */
- g_assert (reg != AMD64_R10);
- } else if (code [2] == 0xe8) {
- /* call <ADDR> */
- return NULL;
- } else if ((code [3] == 0xff) && (amd64_modrm_reg (code [4]) == 0x2) && (amd64_modrm_mod (code [4]) == 0x1) && (amd64_sib_index (code [5]) == 4) && (amd64_sib_scale (code [5]) == 0)) {
- /* call *[r12+disp8] using indexed addressing */
- if (IS_REX (code [2]))
- rex = code [2];
- reg = amd64_sib_base (code [5]);
- disp = *(gint8*)(code + 6);
- } else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
- /* call *%reg */
- return NULL;
- } else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
- /* call *[reg+disp8] */
- if (IS_REX (code [3]))
- rex = code [3];
- reg = amd64_modrm_rm (code [5]);
- disp = *(gint8*)(code + 6);
- //printf ("B: [%%r%d+0x%x]\n", reg, disp);
- }
- else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
- /* call *%reg */
- if (IS_REX (code [4]))
- rex = code [4];
- reg = amd64_modrm_rm (code [6]);
- disp = 0;
- }
- else
- g_assert_not_reached ();
-
- reg += amd64_rex_b (rex);
-
- /* R11 is clobbered by the trampoline code */
- g_assert (reg != AMD64_R11);
-
- *displacement = disp;
- return (gpointer)regs [reg];
-}
-
int
mono_arch_get_this_arg_reg (MonoMethodSignature *sig, MonoGenericSharingContext *gsctx, guint8 *code)
{
- int this_reg = AMD64_ARG_REG1;
-
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- CallInfo *cinfo;
-
- if (!gsctx && code)
- gsctx = mono_get_generic_context_from_code (code);
-
- cinfo = get_call_info (gsctx, NULL, sig, FALSE);
-
- if (cinfo->ret.storage != ArgValuetypeInReg)
- this_reg = AMD64_ARG_REG2;
- g_free (cinfo);
- }
-
- return this_reg;
+ return AMD64_ARG_REG1;
}
gpointer
/*
* mono_arch_get_delegate_invoke_impls:
*
- * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
+ * Return a list of MonoTrampInfo structures for the delegate invoke impl
* trampolines.
*/
GSList*
int i;
code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
code = get_delegate_invoke_impl (FALSE, i, &code_len);
- res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
}
return res;
return cached;
if (mono_aot_only)
- start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
+ start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
else
start = get_delegate_invoke_impl (TRUE, 0, NULL);
if (mono_aot_only) {
char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
- start = mono_aot_get_named_code (name);
+ start = mono_aot_get_trampoline (name);
g_free (name);
} else {
start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R10, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
}
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
- /* See the comment below about R10 */
if (item->has_target_code) {
- amd64_mov_reg_imm (code, AMD64_R10, item->value.target_code);
- amd64_jump_reg (code, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->value.target_code);
+ amd64_jump_reg (code, AMD64_R11);
} else {
- amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, AMD64_R11, 0);
}
if (fail_case) {
amd64_patch (item->jmp_code, code);
- amd64_mov_reg_imm (code, AMD64_R10, fail_tramp);
- amd64_jump_reg (code, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, fail_tramp);
+ amd64_jump_reg (code, AMD64_R11);
item->jmp_code = NULL;
}
} else {
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R10, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
}
item->jmp_code = code;
amd64_branch8 (code, X86_CC_NE, 0, FALSE);
- /* See the comment below about R10 */
- amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, AMD64_R11, 0);
amd64_patch (item->jmp_code, code);
amd64_breakpoint (code);
item->jmp_code = NULL;
#else
- /* We're using R10 here because R11
- needs to be preserved. R10 needs
- to be preserved for calls which
- require a runtime generic context,
- but interface calls don't. */
- amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
- amd64_jump_membase (code, AMD64_R10, 0);
+ amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+ amd64_jump_membase (code, AMD64_R11, 0);
#endif
}
} else {
if (amd64_is_imm32 (item->key))
amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
else {
- amd64_mov_reg_imm (code, AMD64_R10, item->key);
- amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+ amd64_mov_reg_imm (code, AMD64_R11, item->key);
+ amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
}
item->jmp_code = code;
if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
-
-MonoObject*
-mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
-{
- return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
-}
#endif
MonoVTable*
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
+GSList*
+mono_arch_get_cie_program (void)
+{
+ GSList *l = NULL;
+
+ mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, AMD64_RSP, 8);
+ mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, AMD64_RIP, -8);
+
+ return l;
+}
+
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
}
}
+/*
+ * mono_arch_emit_load_aotconst:
+ *
+ * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
+ * TARGET from the mscorlib GOT in full-aot code.
+ * On AMD64, the result is placed into R11.
+ */
+guint8*
+mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
+{
+ *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+
+ return code;
+}
+
+/*
+ * mono_arch_get_trampolines:
+ *
+ * Return a list of MonoTrampInfo structures describing arch specific trampolines
+ * for AOT.
+ */
+GSList *
+mono_arch_get_trampolines (gboolean aot)
+{
+ MonoTrampInfo *info;
+ GSList *tramps = NULL;
+
+ mono_arch_get_throw_pending_exception (&info, aot);
+
+ tramps = g_slist_append (tramps, info);
+
+ return tramps;
+}
+
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
* instead.
*/
g_assert (code [0] == 0x90);
+ if (breakpoint_size == 8) {
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
+ } else {
+ amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4);
+ }
- g_assert (((guint64)bp_trigger_page >> 32) == 0);
-
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
- g_assert (code - orig_code == BREAKPOINT_SIZE);
+ g_assert (code - orig_code == breakpoint_size);
}
/*
guint8 *code = ip;
int i;
- for (i = 0; i < BREAKPOINT_SIZE; ++i)
+ for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
}
+
+gboolean
+mono_arch_is_breakpoint_event (void *info, void *sigctx)
+{
+#ifdef HOST_WIN32
+ EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
+ return FALSE;
+#else
+ siginfo_t* sinfo = (siginfo_t*) info;
+ /* Sometimes the address is off by 4 */
+ if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
+ return TRUE;
+ else
+ return FALSE;
+#endif
+}
+
+/*
+ * mono_arch_get_ip_for_breakpoint:
+ *
+ * Convert the ip in CTX to the address where a breakpoint was placed.
+ */
+guint8*
+mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
+{
+ guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
+
+ /* ip points to the instruction causing the fault */
+ ip -= (breakpoint_size - breakpoint_fault_size);
+
+ return ip;
+}
+
+/*
+ * mono_arch_skip_breakpoint:
+ *
+ * Modify CTX so the ip is placed after the breakpoint instruction, so when
+ * we resume, the instruction is not executed again.
+ */
+void
+mono_arch_skip_breakpoint (MonoContext *ctx)
+{
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
+}
/*
* mono_arch_start_single_stepping:
#endif
}
-gboolean
-mono_arch_is_breakpoint_event (void *info, void *sigctx)
-{
-#ifdef HOST_WIN32
- EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
- return FALSE;
-#else
- siginfo_t* sinfo = (siginfo_t*) info;
- /* Sometimes the address is off by 4 */
- if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
- return TRUE;
- else
- return FALSE;
-#endif
-}
-
-/*
- * mono_arch_get_ip_for_breakpoint:
- *
- * Convert the ip in CTX to the address where a breakpoint was placed.
- */
-guint8*
-mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- /* size of xor r11, r11 */
- ip -= 0;
-
- return ip;
-}
-
/*
* mono_arch_get_ip_for_single_step:
*
{
guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
- /* Size of amd64_mov_reg_mem (r11) */
- ip += 8;
+ ip += single_step_fault_size;
return ip;
}
-/*
- * mono_arch_skip_breakpoint:
- *
- * Modify CTX so the ip is placed after the breakpoint instruction, so when
- * we resume, the instruction is not executed again.
- */
-void
-mono_arch_skip_breakpoint (MonoContext *ctx)
-{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + BREAKPOINT_SIZE);
-}
-
/*
* mono_arch_skip_single_step:
*
void
mono_arch_skip_single_step (MonoContext *ctx)
{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 8);
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + single_step_fault_size);
}
/*