#include <unistd.h>
#endif
+#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/threads.h>
#ifdef HOST_WIN32
static gint jit_tls_offset = -1;
#endif
-static gint appdomain_tls_offset = -1;
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-/* Structure used by the sequence points in AOTed code */
-typedef struct {
- gpointer ss_trigger_page;
- gpointer bp_trigger_page;
- gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
-} SeqPointInfo;
-
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
#define DEBUG(a) if (cfg->verbose_level > 1) a
#ifdef HOST_WIN32
-#define PARAM_REGS 4
-
static AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
#else
-#define PARAM_REGS 6
-
static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
} ArgumentClass;
static ArgumentClass
-merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
+merge_argument_class_from_type (MonoGenericSharingContext *gsctx, MonoType *type, ArgumentClass class1)
{
ArgumentClass class2 = ARG_CLASS_NO_CLASS;
MonoType *ptype;
- ptype = mini_type_get_underlying_type (NULL, type);
+ ptype = mini_type_get_underlying_type (gsctx, type);
switch (ptype->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
for (i = 0; i < info->num_fields; ++i) {
class2 = class1;
- class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
+ class2 = merge_argument_class_from_type (gsctx, info->fields [i].field->type, class2);
}
break;
}
/* (8 is size of quad) */
quadsize [quad] = info->fields [i].offset + size - (quad * 8);
- class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
+ class1 = merge_argument_class_from_type (gsctx, info->fields [i].field->type, class1);
}
g_assert (class1 != ARG_CLASS_NO_CLASS);
args [quad] = class1;
}
gboolean
-mono_arch_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
+mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
{
CallInfo *c1, *c2;
gboolean res;
c1 = get_call_info (NULL, NULL, caller_sig);
c2 = get_call_info (NULL, NULL, callee_sig);
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = callee_sig->ret;
+ callee_ret = mini_replace_type (callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != ArgValuetypeInReg)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
cfg->arch.omit_fp = FALSE;
#endif
+#ifdef HOST_WIN32
+ cfg->arch.omit_fp = FALSE;
+#endif
+
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
sig = mono_method_signature (cfg->method);
cinfo = cfg->arch.cinfo;
- sig_ret = sig->ret;
+ sig_ret = mini_replace_type (sig->ret);
+
/*
* Contrary to mono_arch_allocate_vars (), the information should describe
* where the arguments are at the beginning of the method, not where they can be
sig = mono_method_signature (cfg->method);
cinfo = cfg->arch.cinfo;
- sig_ret = sig->ret;
+ sig_ret = mini_replace_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
offset = 0;
}
- if (cfg->method->save_lmf) {
- /* The LMF var is allocated normally */
- } else {
- if (cfg->arch.omit_fp)
- cfg->arch.reg_save_area_offset = offset;
- /* Reserve space for callee saved registers */
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- offset += sizeof(mgreg_t);
- }
- }
+ cfg->arch.saved_iregs = cfg->used_int_regs;
+ if (cfg->method->save_lmf)
+ /* Save all callee-saved registers normally, and restore them when unwinding through an LMF */
+ cfg->arch.saved_iregs |= (1 << AMD64_RBX) | (1 << AMD64_R12) | (1 << AMD64_R13) | (1 << AMD64_R14) | (1 << AMD64_R15);
+
+ if (cfg->arch.omit_fp)
+ cfg->arch.reg_save_area_offset = offset;
+ /* Reserve space for callee saved registers */
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
+ offset += sizeof(mgreg_t);
+ }
+ if (!cfg->arch.omit_fp)
+ cfg->arch.reg_save_area_offset = -offset;
if (sig_ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
{
MonoMethodSignature *sig;
CallInfo *cinfo;
+ MonoType *sig_ret;
sig = mono_method_signature (cfg->method);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
- if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ sig_ret = mini_replace_type (sig->ret);
+ if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig_ret)) {
cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
MonoType *t, *sig_ret;
n = sig->param_count + sig->hasthis;
- sig_ret = sig->ret;
+ sig_ret = mini_replace_type (sig->ret);
+
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
+ sig_ret = mini_replace_type (sig->ret);
if (sig_ret && MONO_TYPE_ISSTRUCT (sig_ret)) {
MonoInst *vtarg;
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
- MonoType *ret = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret);
+ MonoType *ret = mini_replace_type (mono_method_signature (method)->ret);
if (ret->type == MONO_TYPE_R4) {
if (COMPILE_LLVM (cfg))
CallInfo *cinfo;
} ArchDynCallInfo;
-typedef struct {
- mgreg_t regs [PARAM_REGS];
- mgreg_t res;
- guint8 *ret;
-} DynCallArgs;
-
static gboolean
dyn_call_supported (MonoMethodSignature *sig, CallInfo *cinfo)
{
case OP_IDIV_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
- mono_decompose_op_imm (cfg, bb, ins);
- break;
+ case OP_LREM_IMM:
case OP_IREM_IMM:
- /* Keep the opcode if we can implement it efficiently */
- if (!((ins->inst_imm > 0) && (mono_is_power_of_two (ins->inst_imm) != -1)))
- mono_decompose_op_imm (cfg, bb, ins);
+ mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
int sreg = tree->sreg1;
int need_touch = FALSE;
-#if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(HOST_WIN32)
+ need_touch = TRUE;
+#elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (!tree->flags & MONO_INST_INIT)
need_touch = TRUE;
#endif
static guint8*
emit_tls_get_reg (guint8* code, int dreg, int offset_reg)
{
+ /* offset_reg contains a value translated by mono_arch_translate_tls_offset () */
#ifdef TARGET_OSX
- // FIXME: tls_gs_offset can change too, do these when calculating the tls offset
if (dreg != offset_reg)
- amd64_mov_reg_reg (code, dreg, offset_reg, sizeof (gpointer));
- amd64_shift_reg_imm (code, X86_SHL, dreg, 3);
- if (tls_gs_offset)
- amd64_alu_reg_imm (code, X86_ADD, dreg, tls_gs_offset);
- x86_prefix (code, X86_GS_PREFIX);
- amd64_mov_reg_membase (code, dreg, dreg, 0, sizeof (gpointer));
+ amd64_mov_reg_reg (code, dreg, offset_reg, sizeof (mgreg_t));
+ amd64_prefix (code, X86_GS_PREFIX);
+ amd64_mov_reg_membase (code, dreg, dreg, 0, sizeof (mgreg_t));
#elif defined(__linux__)
int tmpreg = -1;
static guint8*
amd64_emit_tls_set_reg (guint8 *code, int sreg, int offset_reg)
{
+ /* offset_reg contains a value translated by mono_arch_translate_tls_offset () */
#ifdef HOST_WIN32
g_assert_not_reached ();
#elif defined(__APPLE__)
- // FIXME: tls_gs_offset can change too, do these when calculating the tls offset
- g_assert (sreg != AMD64_R11);
- amd64_mov_reg_reg (code, AMD64_R11, offset_reg, sizeof (gpointer));
- amd64_shift_reg_imm (code, X86_SHL, AMD64_R11, 3);
- if (tls_gs_offset)
- amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, tls_gs_offset);
x86_prefix (code, X86_GS_PREFIX);
- amd64_mov_membase_reg (code, AMD64_R11, 0, sreg, sizeof (gpointer));
+ amd64_mov_membase_reg (code, offset_reg, 0, sreg, 8);
#else
x86_prefix (code, X86_FS_PREFIX);
amd64_mov_membase_reg (code, offset_reg, 0, sreg, 8);
#endif
return code;
}
+
+ /*
+ * mono_arch_translate_tls_offset:
+ *
+ * Translate the TLS offset OFFSET computed by MONO_THREAD_VAR_OFFSET () into a format usable by OP_TLS_GET_REG/OP_TLS_SET_REG.
+ */
+int
+mono_arch_translate_tls_offset (int offset)
+{
+#ifdef __APPLE__
+ return tls_gs_offset + (offset * 8);
+#else
+ return offset;
+#endif
+}
/*
* emit_setup_lmf:
static guint8*
emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
{
- int i;
-
/*
* The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
*/
* sp is saved right before calls but we need to save it here too so
* async stack walks would work.
*/
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
- /* Skip method (only needed for trampoline LMF frames) */
- /* Save callee saved regs */
- for (i = 0; i < MONO_MAX_IREGS; ++i) {
- int offset;
-
- switch (i) {
- case AMD64_RBX: offset = G_STRUCT_OFFSET (MonoLMF, rbx); break;
- case AMD64_RBP: offset = G_STRUCT_OFFSET (MonoLMF, rbp); break;
- case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
- case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
- case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
-#ifndef __native_client_codegen__
- case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
-#endif
-#ifdef HOST_WIN32
- case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
- case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
-#endif
- default:
- offset = -1;
- break;
- }
-
- if (offset != -1) {
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + offset, i, 8);
- if ((cfg->arch.omit_fp || (i != AMD64_RBP)) && cfa_offset != -1)
- mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - (lmf_offset + offset)));
- }
- }
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
+ /* Save rbp */
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_RBP, 8);
+ if (cfg->arch.omit_fp && cfa_offset != -1)
+ mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - (cfa_offset - (lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp))));
/* These can't contain refs */
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
#ifdef HOST_WIN32
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
#endif
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
-
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
/* These are handled automatically by the stack marking code */
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), SLOT_NOREF);
-#ifdef HOST_WIN32
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), SLOT_NOREF);
-#endif
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
return code;
}
+#ifdef HOST_WIN32
/*
* emit_push_lmf:
*
static guint8*
emit_push_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, gboolean *args_clobbered)
{
-#ifdef HOST_WIN32
if (jit_tls_offset != -1) {
code = mono_amd64_emit_tls_get (code, AMD64_RAX, jit_tls_offset);
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
} else {
/*
* The call might clobber argument registers, but they are already
}
/* Save lmf_addr */
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
/* Save previous_lmf */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
/* Set new lmf */
amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
-#else
- g_assert_not_reached ();
-#endif
return code;
}
+#endif
+#ifdef HOST_WIN32
/*
* emit_pop_lmf:
*
static guint8*
emit_pop_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
{
-#ifdef HOST_WIN32
/* Restore previous lmf */
- amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
- amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
+ amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
-#else
- g_assert_not_reached ();
-#endif
return code;
}
+#endif
#define REAL_PRINT_REG(text,reg) \
mono_assert (reg >= 0); \
case OP_NOP:
case OP_DUMMY_USE:
case OP_DUMMY_STORE:
+ case OP_DUMMY_ICONST:
+ case OP_DUMMY_R8CONST:
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
/* Load info var */
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
- val = ((offset) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
+ val = ((offset) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
}
break;
- case OP_IREM_IMM: {
- int power = mono_is_power_of_two (ins->inst_imm);
-
- g_assert (ins->sreg1 == X86_EAX);
- g_assert (ins->dreg == X86_EAX);
- g_assert (power >= 0);
-
- if (power == 0) {
- amd64_mov_reg_imm (code, ins->dreg, 0);
- break;
- }
-
- /* Based on gcc code */
-
- /* Add compensation for negative dividents */
- amd64_mov_reg_reg_size (code, AMD64_RDX, AMD64_RAX, 4);
- if (power > 1)
- amd64_shift_reg_imm_size (code, X86_SAR, AMD64_RDX, 31, 4);
- amd64_shift_reg_imm_size (code, X86_SHR, AMD64_RDX, 32 - power, 4);
- amd64_alu_reg_reg_size (code, X86_ADD, AMD64_RAX, AMD64_RDX, 4);
- /* Compute remainder */
- amd64_alu_reg_imm_size (code, X86_AND, AMD64_RAX, (1 << power) - 1, 4);
- /* Remove compensation */
- amd64_alu_reg_reg_size (code, X86_SUB, AMD64_RAX, AMD64_RDX, 4);
- break;
- }
case OP_LMUL_OVF:
amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
}
case OP_TAILCALL: {
MonoCallInst *call = (MonoCallInst*)ins;
- int pos = 0, i;
-
- /* FIXME: no tracing support... */
- if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
- code = mono_arch_instrument_epilog_full (cfg, mono_profiler_method_leave, code, FALSE, TRUE);
+ int i, save_area_offset;
g_assert (!cfg->method->save_lmf);
- if (cfg->arch.omit_fp) {
- guint32 save_offset = 0;
- /* Pop callee-saved registers */
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RSP, save_offset, 8);
- save_offset += 8;
- }
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
+ /* Restore callee saved registers */
+ save_area_offset = cfg->arch.reg_save_area_offset;
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
+ save_area_offset += 8;
+ }
+ if (cfg->arch.omit_fp) {
+ if (cfg->arch.stack_alloc_size)
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
// FIXME:
if (call->stack_usage)
NOT_IMPLEMENTED;
- }
- else {
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= sizeof(mgreg_t);
-
- /* Restore callee-saved registers */
- for (i = AMD64_NREG - 1; i > 0; --i) {
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RBP, pos, sizeof(mgreg_t));
- pos += sizeof(mgreg_t);
- }
- }
-
+ } else {
/* Copy arguments on the stack to our argument area */
for (i = 0; i < call->stack_usage; i += sizeof(mgreg_t)) {
amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, sizeof(mgreg_t));
amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, sizeof(mgreg_t));
}
-
- if (pos)
- amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
amd64_leave (code);
}
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- amd64_mov_membase_reg (code, AMD64_R11, G_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
+ amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
break;
}
case OP_AMD64_SAVE_SP_TO_LMF: {
MonoInst *lmf_var = cfg->lmf_var;
- amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
+ amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
break;
}
case OP_X86_PUSH:
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
break;
- case OP_ICONV_TO_R4: /* FIXME: change precision */
+ case OP_ICONV_TO_R4:
+ amd64_sse_cvtsi2ss_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
+ amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
+ break;
case OP_ICONV_TO_R8:
amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
break;
- case OP_LCONV_TO_R4: /* FIXME: change precision */
+ case OP_LCONV_TO_R4:
+ amd64_sse_cvtsi2ss_reg_reg (code, ins->dreg, ins->sreg1);
+ amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
+ break;
case OP_LCONV_TO_R8:
amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
break;
case OP_FCONV_TO_R4:
- /* FIXME: nothing to do ?? */
+ amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
+ amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
break;
case OP_FCONV_TO_I1:
code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
int dreg = ins->dreg;
guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
- if (dreg == ins->inst_basereg)
- dreg = AMD64_R11;
-
- if (dreg != ins->sreg2)
- amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
-
- x86_prefix (code, X86_LOCK_PREFIX);
- amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
-
- if (dreg != ins->dreg)
- amd64_mov_reg_reg (code, ins->dreg, dreg, size);
-
- break;
- }
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8: {
- int dreg = ins->dreg;
- guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
-
if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
dreg = AMD64_R11;
#endif
}
- /* Save callee saved registers */
- if (!cfg->arch.omit_fp && !method->save_lmf) {
- int offset = cfa_offset;
-
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_push_reg (code, i);
- pos += 8; /* AMD64 push inst is always 8 bytes, no way to change it */
- offset += 8;
- mono_emit_unwind_op_offset (cfg, code, i, - offset);
- async_exc_point (code);
-
- /* These are handled automatically by the stack marking code */
- mini_gc_set_slot_type_from_cfa (cfg, - offset, SLOT_NOREF);
- }
- }
-
/* The param area is always at offset 0 from sp */
/* This needs to be allocated here, since it has to come after the spill area */
if (cfg->arch.no_pushes && cfg->param_area) {
}
/* Save callee saved registers */
- if (cfg->arch.omit_fp && !method->save_lmf) {
- gint32 save_area_offset = cfg->arch.reg_save_area_offset;
+ if (TRUE || !method->save_lmf) {
+ gint32 save_area_offset;
+
+ if (cfg->arch.omit_fp) {
+ save_area_offset = cfg->arch.reg_save_area_offset;
+ /* Save caller saved registers after sp is adjusted */
+ /* The registers are saved at the bottom of the frame */
+ /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
+ } else {
+ /* The registers are saved just below the saved rbp */
+ save_area_offset = cfg->arch.reg_save_area_offset;
+ }
- /* Save caller saved registers after sp is adjusted */
- /* The registers are saved at the bottom of the frame */
- /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_membase_reg (code, AMD64_RSP, save_area_offset, i, 8);
- mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
+ amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8);
- /* These are handled automatically by the stack marking code */
- mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
+ if (cfg->arch.omit_fp) {
+ mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
+ } else {
+ mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8)));
+ // FIXME: GC
+ }
save_area_offset += 8;
async_exc_point (code);
if (cfg->compile_aot) {
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
} else {
amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
}
int max_epilog_size;
CallInfo *cinfo;
gint32 lmf_offset = cfg->lmf_var ? ((MonoInst*)cfg->lmf_var)->inst_offset : -1;
-
+ gint32 save_area_offset = cfg->arch.reg_save_area_offset;
+
max_epilog_size = get_max_epilog_size (cfg);
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
code = cfg->native_code + cfg->code_len;
+ cfg->has_unwind_info_for_epilog = TRUE;
+
+ /* Mark the start of the epilog */
+ mono_emit_unwind_op_mark_loc (cfg, code, 0);
+
+ /* Save the uwind state which is needed by the out-of-line code */
+ mono_emit_unwind_op_remember_state (cfg, code);
+
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
#endif
/* check if we need to restore protection of the stack after a stack overflow */
- if (mono_get_jit_tls_offset () != -1) {
+ if (!cfg->compile_aot && mono_get_jit_tls_offset () != -1) {
guint8 *patch;
code = mono_amd64_emit_tls_get (code, AMD64_RCX, mono_get_jit_tls_offset ());
/* we load the value in a separate instruction: this mechanism may be
* used later as a safer way to do thread interruption
*/
- amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RCX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RCX, MONO_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 8);
x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
patch = code;
x86_branch8 (code, X86_CC_Z, 0, FALSE);
} else {
/* FIXME: maybe save the jit tls in the prolog */
}
-
- /* Restore caller saved regs */
if (cfg->used_int_regs & (1 << AMD64_RBP)) {
- amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_RBX)) {
- amd64_mov_reg_membase (code, AMD64_RBX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R12)) {
- amd64_mov_reg_membase (code, AMD64_R12, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R13)) {
- amd64_mov_reg_membase (code, AMD64_R13, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R14)) {
- amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R15)) {
-#if defined(__default_codegen__)
- amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
-#elif defined(__native_client_codegen__)
- g_assert_not_reached();
-#endif
- }
-#ifdef HOST_WIN32
- if (cfg->used_int_regs & (1 << AMD64_RDI)) {
- amd64_mov_reg_membase (code, AMD64_RDI, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_RSI)) {
- amd64_mov_reg_membase (code, AMD64_RSI, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
}
-#endif
- } else {
-
- if (cfg->arch.omit_fp) {
- gint32 save_area_offset = cfg->arch.reg_save_area_offset;
-
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RSP, save_area_offset, 8);
- save_area_offset += 8;
- }
- }
- else {
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= sizeof(mgreg_t);
-
- if (pos) {
- if (pos == - sizeof(mgreg_t)) {
- /* Only one register, so avoid lea */
- for (i = AMD64_NREG - 1; i > 0; --i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RBP, pos, 8);
- }
- }
- else {
- amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, pos);
+ }
- /* Pop registers in reverse order */
- for (i = AMD64_NREG - 1; i > 0; --i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_pop_reg (code, i);
- }
- }
+ /* Restore callee saved regs */
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
+ /* Restore only used_int_regs, not arch.saved_iregs */
+ if (cfg->used_int_regs & (1 << i)) {
+ amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
+ mono_emit_unwind_op_same_value (cfg, code, i);
+ async_exc_point (code);
}
+ save_area_offset += 8;
}
}
}
if (cfg->arch.omit_fp) {
- if (cfg->arch.stack_alloc_size)
+ if (cfg->arch.stack_alloc_size) {
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
+ }
} else {
amd64_leave (code);
+ mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
}
+ mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8);
async_exc_point (code);
amd64_ret (code);
+ /* Restore the unwind state to be the same as before the epilog */
+ mono_emit_unwind_op_restore_state (cfg, code);
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
guchar *code = p;
int save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
- MonoType *ret_type = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret);
+ MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
int i;
switch (ret_type->type) {
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
- amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, target), 8);
- amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
+ amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
g_assert ((code - start) < 64);
} else {
start = code = mono_global_codeman_reserve (64);
if (param_count == 0) {
- amd64_jump_membase (code, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ amd64_jump_membase (code, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
} else {
/* We have to shift the arguments left */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
#endif
}
- amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
}
g_assert ((code - start) < 64);
}
return NULL;
/* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
+ if (MONO_TYPE_ISSTRUCT (mini_replace_type (sig->ret)))
return NULL;
if (has_target) {
* We need to init this multiple times, since when we are first called, the key might not
* be initialized yet.
*/
- appdomain_tls_offset = mono_domain_get_tls_key ();
jit_tls_offset = mono_get_jit_tls_key ();
/* Only 64 tls entries can be accessed using inline code */
- if (appdomain_tls_offset >= 64)
- appdomain_tls_offset = -1;
if (jit_tls_offset >= 64)
jit_tls_offset = -1;
#else
#ifdef MONO_XEN_OPT
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
#endif
- appdomain_tls_offset = mono_domain_get_tls_offset ();
#endif
}
{
}
-#ifdef MONO_ARCH_HAVE_IMT
-
#if defined(__default_codegen__)
#define CMP_SIZE (6 + 1)
#define CMP_REG_REG_SIZE (4 + 1)
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
-#endif
MonoVTable*
mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
return 0;
}
-MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
-{
- MonoInst* ins;
-
- if (appdomain_tls_offset == -1)
- return NULL;
-
- MONO_INST_NEW (cfg, ins, OP_TLS_GET);
- ins->inst_offset = appdomain_tls_offset;
- return ins;
-}
-
#define _CTX_REG(ctx,fld,i) ((&ctx->fld)[i])
mgreg_t
}
#endif
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_CAS_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}