#include <unistd.h>
#endif
+#include <mono/metadata/abi-details.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/threads.h>
#include "debugger-agent.h"
#include "mini-gc.h"
-#ifdef HOST_WIN32
-static gint jit_tls_offset = -1;
-#endif
-
#ifdef MONO_XEN_OPT
static gboolean optimize_for_xen = TRUE;
#else
#endif
/* This mutex protects architecture specific caches */
-#define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
-#define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
-static CRITICAL_SECTION mini_arch_mutex;
+#define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
+#define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
+static mono_mutex_t mini_arch_mutex;
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-/* Structure used by the sequence points in AOTed code */
-typedef struct {
- gpointer ss_trigger_page;
- gpointer bp_trigger_page;
- gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
-} SeqPointInfo;
-
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
/* The size of the single step instruction causing the actual fault */
static int single_step_fault_size;
-#ifdef HOST_WIN32
-/* On Win64 always reserve first 32 bytes for first four arguments */
-#define ARGS_OFFSET 48
-#else
+/* Offset between fp and the first argument in the callee */
#define ARGS_OFFSET 16
-#endif
#define GP_SCRATCH_REG AMD64_R11
/*
#define DEBUG(a) if (cfg->verbose_level > 1) a
#ifdef HOST_WIN32
-#define PARAM_REGS 4
-
static AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
#else
-#define PARAM_REGS 6
-
static AMD64_Reg_No param_regs [] = { AMD64_RDI, AMD64_RSI, AMD64_RDX, AMD64_RCX, AMD64_R8, AMD64_R9 };
static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
gr = 0;
fr = 0;
+#ifdef HOST_WIN32
+ /* Reserve space where the callee can save the argument registers */
+ stack_size = 4 * sizeof (mgreg_t);
+#endif
+
/* return value */
{
ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
}
/* fall through */
case MONO_TYPE_VALUETYPE:
- add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
- break;
case MONO_TYPE_TYPEDBYREF:
-#if defined( HOST_WIN32 ) || defined( __native_client_codegen__ )
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
-#else
- stack_size += sizeof (MonoTypedRef);
- ainfo->storage = ArgOnStack;
-#endif
break;
case MONO_TYPE_U8:
+
case MONO_TYPE_I8:
add_general (&gr, &stack_size, ainfo);
break;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
-#ifdef HOST_WIN32
- // There always is 32 bytes reserved on the stack when calling on Winx64
- stack_size += 0x20;
-#endif
-
-#ifndef MONO_AMD64_NO_PUSHES
- if (stack_size & 0x8) {
- /* The AMD64 ABI requires each stack frame to be 16 byte aligned */
- cinfo->need_stack_align = TRUE;
- stack_size += 8;
- }
-#endif
-
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
cinfo->freg_usage = fr;
{
int flags;
- InitializeCriticalSection (&mini_arch_mutex);
+ mono_mutex_init_recursive (&mini_arch_mutex);
#if defined(__native_client_codegen__)
mono_native_tls_alloc (&nacl_instruction_depth, NULL);
mono_native_tls_set_value (nacl_instruction_depth, (gpointer)0);
void
mono_arch_cleanup (void)
{
- DeleteCriticalSection (&mini_arch_mutex);
+ mono_mutex_destroy (&mini_arch_mutex);
#if defined(__native_client_codegen__)
mono_native_tls_free (nacl_instruction_depth);
mono_native_tls_free (nacl_rex_tag);
cfg->arch.omit_fp = FALSE;
#endif
-#ifdef HOST_WIN32
- cfg->arch.omit_fp = FALSE;
-#endif
-
if (cfg->disable_omit_fp)
cfg->arch.omit_fp = FALSE;
offset = 0;
}
- if (cfg->method->save_lmf) {
- /* The LMF var is allocated normally */
- } else {
- if (cfg->arch.omit_fp)
- cfg->arch.reg_save_area_offset = offset;
- /* Reserve space for callee saved registers */
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- offset += sizeof(mgreg_t);
- }
- if (!cfg->arch.omit_fp)
- cfg->arch.reg_save_area_offset = -offset;
- }
+ cfg->arch.saved_iregs = cfg->used_int_regs;
+ if (cfg->method->save_lmf)
+ /* Save all callee-saved registers normally, and restore them when unwinding through an LMF */
+ cfg->arch.saved_iregs |= (1 << AMD64_RBX) | (1 << AMD64_R12) | (1 << AMD64_R13) | (1 << AMD64_R14) | (1 << AMD64_R15);
+
+ if (cfg->arch.omit_fp)
+ cfg->arch.reg_save_area_offset = offset;
+ /* Reserve space for callee saved registers */
+ for (i = 0; i < AMD64_NREG; ++i)
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
+ offset += sizeof(mgreg_t);
+ }
+ if (!cfg->arch.omit_fp)
+ cfg->arch.reg_save_area_offset = -offset;
if (sig_ret->type != MONO_TYPE_VOID) {
switch (cinfo->ret.storage) {
cfg->arch.ss_trigger_page_var = ins;
}
-#ifdef MONO_AMD64_NO_PUSHES
- /*
- * When this is set, we pass arguments on the stack by moves, and by allocating
- * a bigger stack frame, instead of pushes.
- * Pushes complicate exception handling because the arguments on the stack have
- * to be popped each time a frame is unwound. They also make fp elimination
- * impossible.
- * FIXME: This doesn't work inside filter/finally clauses, since those execute
- * on a new frame which doesn't include a param area.
- */
- cfg->arch.no_pushes = TRUE;
-#endif
-
if (cfg->method->save_lmf)
cfg->create_lmf_var = TRUE;
-#if !defined(HOST_WIN32)
if (cfg->method->save_lmf) {
cfg->lmf_ir = TRUE;
+#if !defined(HOST_WIN32)
if (mono_get_lmf_tls_offset () != -1 && !optimize_for_xen)
cfg->lmf_ir_mono_lmf = TRUE;
- }
-#endif
-
-#ifndef MONO_AMD64_NO_PUSHES
- cfg->arch_eh_jit_info = 1;
#endif
+ }
}
static void
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
- MonoInst *arg;
MonoMethodSignature *tmp_sig;
int sig_reg;
sig_reg = mono_alloc_ireg (cfg);
MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
- if (cfg->arch.no_pushes) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg);
- } else {
- MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
- arg->sreg1 = sig_reg;
- MONO_ADD_INS (cfg->cbb, arg);
- }
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg);
}
static inline LLVMArgStorage
return;
}
- if (cinfo->need_stack_align) {
- if (!cfg->arch.no_pushes)
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
- }
-
/*
* Emit all arguments which are passed on the stack to prevent register
* allocation problems.
*/
- if (cfg->arch.no_pushes) {
- for (i = 0; i < n; ++i) {
- MonoType *t;
- ainfo = cinfo->args + i;
+ for (i = 0; i < n; ++i) {
+ MonoType *t;
+ ainfo = cinfo->args + i;
- in = call->args [i];
+ in = call->args [i];
- if (sig->hasthis && i == 0)
- t = &mono_defaults.object_class->byval_arg;
- else
- t = sig->params [i - sig->hasthis];
+ if (sig->hasthis && i == 0)
+ t = &mono_defaults.object_class->byval_arg;
+ else
+ t = sig->params [i - sig->hasthis];
- if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t) && !call->tail_call) {
- if (!t->byref) {
- if (t->type == MONO_TYPE_R4)
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
- else if (t->type == MONO_TYPE_R8)
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
- else
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
- } else {
+ if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t) && !call->tail_call) {
+ if (!t->byref) {
+ if (t->type == MONO_TYPE_R4)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
+ else if (t->type == MONO_TYPE_R8)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
+ else
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
- }
- if (cfg->compute_gc_maps) {
- MonoInst *def;
+ } else {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
+ }
+ if (cfg->compute_gc_maps) {
+ MonoInst *def;
- EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
- }
+ EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
}
}
}
arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
- MONO_ADD_INS (cfg->cbb, arg);
- }
- } else {
- if (cfg->arch.no_pushes) {
- /* Already done */
- } else {
- MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
- arg->sreg1 = in->dreg;
- if (!sig->params [i - sig->hasthis]->byref) {
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
- arg->opcode = OP_STORER4_MEMBASE_REG;
- arg->inst_destbasereg = X86_ESP;
- arg->inst_offset = 0;
- } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
- arg->opcode = OP_STORER8_MEMBASE_REG;
- arg->inst_destbasereg = X86_ESP;
- arg->inst_offset = 0;
- }
- }
MONO_ADD_INS (cfg->cbb, arg);
}
}
}
}
-#ifdef HOST_WIN32
- if (call->inst.opcode != OP_TAILCALL) {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 0x20);
- }
-#endif
-
if (cfg->method->save_lmf) {
MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF);
MONO_ADD_INS (cfg->cbb, arg);
MonoInst *vtaddr, *load;
vtaddr = mono_compile_create_var (cfg, &ins->klass->byval_arg, OP_LOCAL);
- g_assert (!cfg->arch.no_pushes);
-
MONO_INST_NEW (cfg, load, OP_LDADDR);
cfg->has_indirection = TRUE;
load->inst_p0 = vtaddr;
MONO_ADD_INS (cfg->cbb, arg);
mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, ainfo->pair_regs [0], FALSE);
} else {
- MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
- arg->sreg1 = load->dreg;
- MONO_ADD_INS (cfg->cbb, arg);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, load->dreg);
}
} else {
if (size == 8) {
- if (cfg->arch.no_pushes) {
- int dreg = mono_alloc_ireg (cfg);
+ int dreg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg);
- } else {
- /* Can't use this for < 8 since it does an 8 byte memory load */
- MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
- arg->inst_basereg = src->dreg;
- arg->inst_offset = 0;
- MONO_ADD_INS (cfg->cbb, arg);
- }
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg);
} else if (size <= 40) {
- if (cfg->arch.no_pushes) {
- mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, 4);
- } else {
- MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 8));
- mini_emit_memcpy (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
- }
+ mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, 4);
} else {
- if (cfg->arch.no_pushes) {
- // FIXME: Code growth
- mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, 4);
- } else {
- MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
- arg->inst_basereg = src->dreg;
- arg->inst_offset = 0;
- arg->inst_imm = size;
- MONO_ADD_INS (cfg->cbb, arg);
- }
+ // FIXME: Code growth
+ mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, 4);
}
if (cfg->compute_gc_maps) {
CallInfo *cinfo;
} ArchDynCallInfo;
-typedef struct {
- mgreg_t regs [PARAM_REGS];
- mgreg_t res;
- guint8 *ret;
-} DynCallArgs;
-
static gboolean
dyn_call_supported (MonoMethodSignature *sig, CallInfo *cinfo)
{
int need_touch = FALSE;
#if defined(HOST_WIN32)
- need_touch = TRUE;
+ need_touch = TRUE;
#elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (!tree->flags & MONO_INST_INIT)
need_touch = TRUE;
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
- if (cfg->param_area && cfg->arch.no_pushes)
+ if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
amd64_cld (code);
#if defined(__default_codegen__)
mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
#ifdef HOST_WIN32
- g_assert (tls_offset < 64);
- x86_prefix (code, X86_GS_PREFIX);
- amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
+ if (tls_offset < 64) {
+ x86_prefix (code, X86_GS_PREFIX);
+ amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
+ } else {
+ guint8 *buf [16];
+
+ g_assert (tls_offset < 0x440);
+ /* Load TEB->TlsExpansionSlots */
+ x86_prefix (code, X86_GS_PREFIX);
+ amd64_mov_reg_mem (code, dreg, 0x1780, 8);
+ amd64_test_reg_reg (code, dreg, dreg);
+ buf [0] = code;
+ amd64_branch (code, X86_CC_EQ, code, TRUE);
+ amd64_mov_reg_membase (code, dreg, dreg, (tls_offset * 8) - 0x200, 8);
+ amd64_patch (buf [0], code);
+ }
#elif defined(__APPLE__)
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8);
static guint8*
emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
{
- int i;
-
/*
* The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
*/
* sp is saved right before calls but we need to save it here too so
* async stack walks would work.
*/
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
- /* Skip method (only needed for trampoline LMF frames) */
- /* Save callee saved regs */
- for (i = 0; i < MONO_MAX_IREGS; ++i) {
- int offset;
-
- switch (i) {
- case AMD64_RBX: offset = G_STRUCT_OFFSET (MonoLMF, rbx); break;
- case AMD64_RBP: offset = G_STRUCT_OFFSET (MonoLMF, rbp); break;
- case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
- case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
- case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
-#ifndef __native_client_codegen__
- case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
-#endif
-#ifdef HOST_WIN32
- case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
- case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
-#endif
- default:
- offset = -1;
- break;
- }
-
- if (offset != -1) {
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + offset, i, 8);
- if ((cfg->arch.omit_fp || (i != AMD64_RBP)) && cfa_offset != -1)
- mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - (lmf_offset + offset)));
- }
- }
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
+ /* Save rbp */
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_RBP, 8);
+ if (cfg->arch.omit_fp && cfa_offset != -1)
+ mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - (cfa_offset - (lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp))));
/* These can't contain refs */
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
-#ifdef HOST_WIN32
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
-#endif
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
-
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
/* These are handled automatically by the stack marking code */
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), SLOT_NOREF);
-#ifdef HOST_WIN32
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), SLOT_NOREF);
- mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), SLOT_NOREF);
-#endif
-
- return code;
-}
-
-#ifdef HOST_WIN32
-/*
- * emit_push_lmf:
- *
- * Emit code to push an LMF structure on the LMF stack.
- */
-static guint8*
-emit_push_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, gboolean *args_clobbered)
-{
- if (jit_tls_offset != -1) {
- code = mono_amd64_emit_tls_get (code, AMD64_RAX, jit_tls_offset);
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
- } else {
- /*
- * The call might clobber argument registers, but they are already
- * saved to the stack/global regs.
- */
- if (args_clobbered)
- *args_clobbered = TRUE;
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_get_lmf_addr", TRUE);
- }
-
- /* Save lmf_addr */
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
- /* Save previous_lmf */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
- /* Set new lmf */
- amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
- amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
-
- return code;
-}
-#endif
-
-#ifdef HOST_WIN32
-/*
- * emit_pop_lmf:
- *
- * Emit code to pop an LMF structure from the LMF stack.
- */
-static guint8*
-emit_pop_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
-{
- /* Restore previous lmf */
- amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
- amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
- amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
return code;
}
-#endif
#define REAL_PRINT_REG(text,reg) \
mono_assert (reg >= 0); \
case OP_NOT_REACHED:
case OP_NOT_NULL:
break;
+ case OP_IL_SEQ_POINT:
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+ break;
case OP_SEQ_POINT: {
int i;
/* Load info var */
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
- val = ((offset) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
+ val = ((offset) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
/* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr, FALSE);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
- if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
amd64_call_reg (code, ins->sreg1);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
- if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
- if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
break;
case OP_DYN_CALL: {
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- amd64_mov_membase_reg (code, AMD64_R11, G_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
+ amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
break;
}
case OP_AMD64_SAVE_SP_TO_LMF: {
MonoInst *lmf_var = cfg->lmf_var;
- amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
+ amd64_mov_membase_reg (code, lmf_var->inst_basereg, lmf_var->inst_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
break;
}
case OP_X86_PUSH:
- g_assert (!cfg->arch.no_pushes);
+ g_assert_not_reached ();
amd64_push_reg (code, ins->sreg1);
break;
case OP_X86_PUSH_IMM:
- g_assert (!cfg->arch.no_pushes);
+ g_assert_not_reached ();
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_push_imm (code, ins->inst_imm);
break;
case OP_X86_PUSH_MEMBASE:
- g_assert (!cfg->arch.no_pushes);
+ g_assert_not_reached ();
amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
break;
case OP_X86_PUSH_OBJ: {
int size = ALIGN_TO (ins->inst_imm, 8);
- g_assert (!cfg->arch.no_pushes);
+ g_assert_not_reached ();
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_push_reg (code, AMD64_RDI);
amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
code = mono_emit_stack_alloc (cfg, code, ins);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
- if (cfg->param_area && cfg->arch.no_pushes)
+ if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
break;
case OP_LOCALLOC_IMM: {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
}
- if (cfg->param_area && cfg->arch.no_pushes)
+ if (cfg->param_area)
amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
break;
}
if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY)) &&
- cfg->param_area && cfg->arch.no_pushes) {
+ cfg->param_area) {
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
}
break;
break;
case OP_STORER4_MEMBASE_REG:
/* This requires a double->single conversion */
- amd64_sse_cvtsd2ss_reg_reg (code, AMD64_XMM15, ins->sreg1);
- amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, AMD64_XMM15);
+ amd64_sse_cvtsd2ss_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
+ amd64_sse_movss_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, MONO_ARCH_FP_SCRATCH_REG);
break;
case OP_LOADR4_MEMBASE:
amd64_sse_movss_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
int dreg = ins->dreg;
guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
- if (dreg == ins->inst_basereg)
- dreg = AMD64_R11;
-
- if (dreg != ins->sreg2)
- amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
-
- x86_prefix (code, X86_LOCK_PREFIX);
- amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
-
- if (dreg != ins->dreg)
- amd64_mov_reg_reg (code, ins->dreg, dreg, size);
-
- break;
- }
- case OP_ATOMIC_ADD_NEW_I4:
- case OP_ATOMIC_ADD_NEW_I8: {
- int dreg = ins->dreg;
- guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
-
if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
dreg = AMD64_R11;
break;
case OP_EXTRACT_I8:
if (ins->inst_c0) {
- amd64_movhlps_reg_reg (code, AMD64_XMM15, ins->sreg1);
- amd64_movd_reg_xreg_size (code, ins->dreg, AMD64_XMM15, 8);
+ amd64_movhlps_reg_reg (code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg1);
+ amd64_movd_reg_xreg_size (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG, 8);
} else {
amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
}
amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
break;
case OP_INSERTX_I8_SLOW:
- amd64_movd_xreg_reg_size(code, AMD64_XMM15, ins->sreg2, 8);
+ amd64_movd_xreg_reg_size(code, MONO_ARCH_FP_SCRATCH_REG, ins->sreg2, 8);
if (ins->inst_c0)
- amd64_movlhps_reg_reg (code, ins->dreg, AMD64_XMM15);
+ amd64_movlhps_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
else
- amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM15);
+ amd64_sse_movsd_reg_reg (code, ins->dreg, MONO_ARCH_FP_SCRATCH_REG);
break;
case OP_INSERTX_R4_SLOW:
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *ins;
- int alloc_size, pos, i, cfa_offset, quad, max_epilog_size;
+ int alloc_size, pos, i, cfa_offset, quad, max_epilog_size, save_area_offset;
guint8 *code;
CallInfo *cinfo;
MonoInst *lmf_var = cfg->lmf_var;
guint alignment_check;
#endif
- cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 1024);
#if defined(__default_codegen__)
code = cfg->native_code = g_malloc (cfg->code_size);
/* The param area is always at offset 0 from sp */
/* This needs to be allocated here, since it has to come after the spill area */
- if (cfg->arch.no_pushes && cfg->param_area) {
+ if (cfg->param_area) {
if (cfg->arch.omit_fp)
// FIXME:
g_assert_not_reached ();
}
#endif
-#ifndef TARGET_WIN32
if (mini_get_debug_options ()->init_stacks) {
/* Fill the stack frame with a dummy value to force deterministic behavior */
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, -8, 8);
amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, -16, 8);
}
-#endif
/* Save LMF */
- if (method->save_lmf) {
+ if (method->save_lmf)
code = emit_setup_lmf (cfg, code, lmf_var->inst_offset, cfa_offset);
- }
/* Save callee saved registers */
- if (!method->save_lmf) {
- gint32 save_area_offset;
-
- if (cfg->arch.omit_fp) {
- save_area_offset = cfg->arch.reg_save_area_offset;
- /* Save caller saved registers after sp is adjusted */
- /* The registers are saved at the bottom of the frame */
- /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
- } else {
- /* The registers are saved just below the saved rbp */
- save_area_offset = cfg->arch.reg_save_area_offset;
- }
-
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8);
+ if (cfg->arch.omit_fp) {
+ save_area_offset = cfg->arch.reg_save_area_offset;
+ /* Save caller saved registers after sp is adjusted */
+ /* The registers are saved at the bottom of the frame */
+ /* FIXME: Optimize this so the regs are saved at the end of the frame in increasing order */
+ } else {
+ /* The registers are saved just below the saved rbp */
+ save_area_offset = cfg->arch.reg_save_area_offset;
+ }
- if (cfg->arch.omit_fp) {
- mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
- /* These are handled automatically by the stack marking code */
- mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
- } else {
- mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8)));
- // FIXME: GC
- }
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
+ amd64_mov_membase_reg (code, cfg->frame_reg, save_area_offset, i, 8);
- save_area_offset += 8;
- async_exc_point (code);
+ if (cfg->arch.omit_fp) {
+ mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
+ } else {
+ mono_emit_unwind_op_offset (cfg, code, i, - (-save_area_offset + (2 * 8)));
+ // FIXME: GC
}
+
+ save_area_offset += 8;
+ async_exc_point (code);
+ }
}
/* store runtime generic context */
}
}
-#ifdef HOST_WIN32
- if (method->save_lmf) {
- code = emit_push_lmf (cfg, code, lmf_var->inst_offset, &args_clobbered);
- }
-#else
- args_clobbered = TRUE;
-#endif
+ if (cfg->method->save_lmf)
+ args_clobbered = TRUE;
if (trace) {
args_clobbered = TRUE;
if (cfg->compile_aot) {
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
} else {
amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
}
int max_epilog_size;
CallInfo *cinfo;
gint32 lmf_offset = cfg->lmf_var ? ((MonoInst*)cfg->lmf_var)->inst_offset : -1;
-
+ gint32 save_area_offset = cfg->arch.reg_save_area_offset;
+
max_epilog_size = get_max_epilog_size (cfg);
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->native_code = mono_realloc_native_code (cfg);
cfg->stat_code_reallocs++;
}
-
code = cfg->native_code + cfg->code_len;
+ cfg->has_unwind_info_for_epilog = TRUE;
+
+ /* Mark the start of the epilog */
+ mono_emit_unwind_op_mark_loc (cfg, code, 0);
+
+ /* Save the uwind state which is needed by the out-of-line code */
+ mono_emit_unwind_op_remember_state (cfg, code);
+
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
pos = 0;
if (method->save_lmf) {
-#ifdef HOST_WIN32
- code = emit_pop_lmf (cfg, code, lmf_offset);
-#endif
-
/* check if we need to restore protection of the stack after a stack overflow */
- if (mono_get_jit_tls_offset () != -1) {
+ if (!cfg->compile_aot && mono_get_jit_tls_offset () != -1) {
guint8 *patch;
code = mono_amd64_emit_tls_get (code, AMD64_RCX, mono_get_jit_tls_offset ());
/* we load the value in a separate instruction: this mechanism may be
* used later as a safer way to do thread interruption
*/
- amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RCX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RCX, MONO_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 8);
x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
patch = code;
x86_branch8 (code, X86_CC_Z, 0, FALSE);
} else {
/* FIXME: maybe save the jit tls in the prolog */
}
-
- /* Restore caller saved regs */
if (cfg->used_int_regs & (1 << AMD64_RBP)) {
- amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_RBX)) {
- amd64_mov_reg_membase (code, AMD64_RBX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R12)) {
- amd64_mov_reg_membase (code, AMD64_R12, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R13)) {
- amd64_mov_reg_membase (code, AMD64_R13, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R14)) {
- amd64_mov_reg_membase (code, AMD64_R14, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), 8);
- }
- if (cfg->used_int_regs & (1 << AMD64_R15)) {
-#if defined(__default_codegen__)
- amd64_mov_reg_membase (code, AMD64_R15, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), 8);
-#elif defined(__native_client_codegen__)
- g_assert_not_reached();
-#endif
- }
-#ifdef HOST_WIN32
- if (cfg->used_int_regs & (1 << AMD64_RDI)) {
- amd64_mov_reg_membase (code, AMD64_RDI, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), 8);
+ amd64_mov_reg_membase (code, AMD64_RBP, cfg->frame_reg, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), 8);
}
- if (cfg->used_int_regs & (1 << AMD64_RSI)) {
- amd64_mov_reg_membase (code, AMD64_RSI, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), 8);
- }
-#endif
- } else {
- gint32 save_area_offset = cfg->arch.reg_save_area_offset;
+ }
- for (i = 0; i < AMD64_NREG; ++i)
- if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
+ /* Restore callee saved regs */
+ for (i = 0; i < AMD64_NREG; ++i) {
+ if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->arch.saved_iregs & (1 << i))) {
+ /* Restore only used_int_regs, not arch.saved_iregs */
+ if (cfg->used_int_regs & (1 << i)) {
amd64_mov_reg_membase (code, i, cfg->frame_reg, save_area_offset, 8);
- save_area_offset += 8;
+ mono_emit_unwind_op_same_value (cfg, code, i);
+ async_exc_point (code);
}
+ save_area_offset += 8;
+ }
}
/* Load returned vtypes into registers if needed */
}
if (cfg->arch.omit_fp) {
- if (cfg->arch.stack_alloc_size)
+ if (cfg->arch.stack_alloc_size) {
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, cfg->arch.stack_alloc_size);
+ }
} else {
amd64_leave (code);
+ mono_emit_unwind_op_same_value (cfg, code, AMD64_RBP);
}
+ mono_emit_unwind_op_def_cfa (cfg, code, AMD64_RSP, 8);
async_exc_point (code);
amd64_ret (code);
+ /* Restore the unwind state to be the same as before the epilog */
+ mono_emit_unwind_op_restore_state (cfg, code);
+
cfg->code_len = code - cfg->native_code;
g_assert (cfg->code_len < cfg->code_size);
int reg;
gint64 value;
- mono_arch_sigctx_to_monoctx (sigctx, &ctx);
+ mono_sigctx_to_monoctx (sigctx, &ctx);
rip = (guint8*)ctx.rip;
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
- amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, target), 8);
- amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
+ amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
g_assert ((code - start) < 64);
} else {
start = code = mono_global_codeman_reserve (64);
if (param_count == 0) {
- amd64_jump_membase (code, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ amd64_jump_membase (code, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
} else {
/* We have to shift the arguments left */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
#endif
}
- amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+ amd64_jump_membase (code, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
}
g_assert ((code - start) < 64);
}
- nacl_global_codeman_validate(&start, 64, &code);
-
- mono_debug_add_delegate_trampoline (start, code - start);
+ nacl_global_codeman_validate (&start, 64, &code);
if (code_len)
*code_len = code - start;
-
if (mono_jit_map_is_enabled ()) {
char *buff;
if (has_target)
return start;
}
+
+gpointer
+mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
+{
+ guint8 *code, *start;
+ int size = 20;
+
+ start = code = mono_global_codeman_reserve (size);
+
+ /* Replace the this argument with the target */
+ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
+ amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
+
+ if (load_imt_reg) {
+ /* Load the IMT reg */
+ amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8);
+ }
+
+ /* Load the vtable */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8);
+ amd64_jump_membase (code, AMD64_RAX, offset);
+
+ return start;
+}
+
void
mono_arch_finish_init (void)
{
-#ifdef HOST_WIN32
- /*
- * We need to init this multiple times, since when we are first called, the key might not
- * be initialized yet.
- */
- jit_tls_offset = mono_get_jit_tls_key ();
-
- /* Only 64 tls entries can be accessed using inline code */
- if (jit_tls_offset >= 64)
- jit_tls_offset = -1;
-#else
-#ifdef MONO_XEN_OPT
+#if !defined(HOST_WIN32) && defined(MONO_XEN_OPT)
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
#endif
-#endif
}
void
{
}
-#ifdef MONO_ARCH_HAVE_IMT
-
#if defined(__default_codegen__)
#define CMP_SIZE (6 + 1)
#define CMP_REG_REG_SIZE (4 + 1)
{
return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
}
-#endif
MonoVTable*
mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
}
}
-/*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
gpointer
mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
{
- int offset;
gpointer *sp, old_value;
char *bp;
- const unsigned char *handler;
-
- /*Decode the first instruction to figure out where did we store the spvar*/
- /*Our jit MUST generate the following:
- mov %rsp, ?(%rbp)
-
- Which is encoded as: REX.W 0x89 mod_rm
- mod_rm (rsp, rbp, imm) which can be: (imm will never be zero)
- mod (reg + imm8): 01 reg(rsp): 100 rm(rbp): 101 -> 01100101 (0x65)
- mod (reg + imm32): 10 reg(rsp): 100 rm(rbp): 101 -> 10100101 (0xA5)
-
- FIXME can we generate frameless methods on this case?
-
- */
- handler = clause->handler_start;
-
- /*REX.W*/
- if (*handler != 0x48)
- return NULL;
- ++handler;
-
- /*mov r, r/m */
- if (*handler != 0x89)
- return NULL;
- ++handler;
-
- if (*handler == 0x65)
- offset = *(signed char*)(handler + 1);
- else if (*handler == 0xA5)
- offset = *(int*)(handler + 1);
- else
- return NULL;
/*Load the spvar*/
bp = MONO_CONTEXT_GET_BP (ctx);
- sp = *(gpointer*)(bp + offset);
+ sp = *(gpointer*)(bp + clause->exvar_offset);
old_value = *sp;
if (old_value < ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
#ifdef HOST_WIN32
- EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
- return FALSE;
+ EXCEPTION_RECORD* einfo = ((EXCEPTION_POINTERS*)info)->ExceptionRecord;
+ if (einfo->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && (gpointer)einfo->ExceptionInformation [1] == bp_trigger_page)
+ return TRUE;
+ else
+ return FALSE;
#else
siginfo_t* sinfo = (siginfo_t*) info;
/* Sometimes the address is off by 4 */
mono_arch_is_single_step_event (void *info, void *sigctx)
{
#ifdef HOST_WIN32
- EXCEPTION_RECORD* einfo = (EXCEPTION_RECORD*)info;
- return FALSE;
+ EXCEPTION_RECORD* einfo = ((EXCEPTION_POINTERS*)info)->ExceptionRecord;
+ if (einfo->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && (gpointer)einfo->ExceptionInformation [1] == ss_trigger_page)
+ return TRUE;
+ else
+ return FALSE;
#else
siginfo_t* sinfo = (siginfo_t*) info;
/* Sometimes the address is off by 4 */
}
#endif
+
+gboolean
+mono_arch_opcode_supported (int opcode)
+{
+ switch (opcode) {
+ case OP_ATOMIC_ADD_I4:
+ case OP_ATOMIC_ADD_I8:
+ case OP_ATOMIC_EXCHANGE_I4:
+ case OP_ATOMIC_EXCHANGE_I8:
+ case OP_ATOMIC_CAS_I4:
+ case OP_ATOMIC_CAS_I8:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}