#include "mini-amd64.h"
#include "cpu-amd64.h"
#include "debugger-agent.h"
+#include "mini-gc.h"
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
/* Only if storage == ArgValuetypeInReg */
ArgStorage pair_storage [2];
gint8 pair_regs [2];
+ int nregs;
} ArgInfo;
typedef struct {
ainfo->storage = ArgOnStack;
/* Since the same stack slot size is used for all arg */
/* types, it needs to be big enough to hold them all */
- (*stack_size) += SIZEOF_REGISTER;
+ (*stack_size) += sizeof(mgreg_t);
}
else {
ainfo->storage = ArgInIReg;
ainfo->storage = ArgOnStack;
/* Since the same stack slot size is used for both float */
/* types, it needs to be big enough to hold them both */
- (*stack_size) += SIZEOF_REGISTER;
+ (*stack_size) += sizeof(mgreg_t);
}
else {
/* A double register */
/* If this struct can't be split up naturally into 8-byte */
/* chunks (registers), pass it on the stack. */
if (sig->pinvoke && !pass_on_stack) {
- info = mono_marshal_load_type_info (klass);
- g_assert(info);
guint32 align;
guint32 field_size;
+
+ info = mono_marshal_load_type_info (klass);
+ g_assert(info);
for (i = 0; i < info->num_fields; ++i) {
field_size = mono_marshal_type_size (info->fields [i].field->type,
info->fields [i].mspec,
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+ ainfo->nregs = nquads;
for (quad = 0; quad < nquads; ++quad) {
switch (args [quad]) {
case ARG_CLASS_INTEGER:
if (sig->pinvoke)
*stack_size += ALIGN_TO (info->native_size, 8);
else
- *stack_size += nquads * SIZEOF_REGISTER;
+ *stack_size += nquads * sizeof(mgreg_t);
ainfo->storage = ArgOnStack;
}
}
stack_size += 0x20;
#endif
+#ifndef MONO_AMD64_NO_PUSHES
if (stack_size & 0x8) {
/* The AMD64 ABI requires each stack frame to be 16 byte aligned */
cinfo->need_stack_align = TRUE;
stack_size += 8;
}
+#endif
cinfo->stack_usage = stack_size;
cinfo->reg_usage = gr;
}
if (cfg->method->save_lmf) {
- /* Reserve stack space for saving LMF */
- if (cfg->arch.omit_fp) {
- cfg->arch.lmf_offset = offset;
- offset += sizeof (MonoLMF);
- }
- else {
- offset += sizeof (MonoLMF);
- cfg->arch.lmf_offset = -offset;
- }
+ /* The LMF var is allocated normally */
} else {
if (cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = offset;
/* Reserve space for caller saved registers */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- offset += SIZEOF_REGISTER;
+ offset += sizeof(mgreg_t);
}
}
/* Allocate locals */
if (!cfg->globalra) {
- offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
+ offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
char *mname = mono_method_full_name (cfg->method, TRUE);
cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
- offset = ALIGN_TO (offset, SIZEOF_REGISTER);
+ offset = ALIGN_TO (offset, sizeof(mgreg_t));
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * SIZEOF_REGISTER : SIZEOF_REGISTER;
+ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
} else {
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * SIZEOF_REGISTER : SIZEOF_REGISTER;
+ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
ins->inst_offset = - offset;
}
break;
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
- offset = ALIGN_TO (offset, SIZEOF_REGISTER);
+ offset = ALIGN_TO (offset, sizeof(mgreg_t));
if (cfg->arch.omit_fp) {
ins->inst_offset = offset;
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * SIZEOF_REGISTER : SIZEOF_REGISTER;
+ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
// Arguments are yet supported by the stack map creation code
//cfg->locals_max_stack_offset = MAX (cfg->locals_max_stack_offset, offset);
} else {
- offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * SIZEOF_REGISTER : SIZEOF_REGISTER;
+ offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
ins->inst_offset = - offset;
//cfg->locals_min_stack_offset = MIN (cfg->locals_min_stack_offset, offset);
}
*/
cfg->arch.no_pushes = TRUE;
#endif
+
+ if (cfg->method->save_lmf) {
+ MonoInst *lmf_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ lmf_var->flags |= MONO_INST_VOLATILE;
+ lmf_var->flags |= MONO_INST_LMF;
+ cfg->arch.lmf_var = lmf_var;
+ }
}
static void
switch (storage) {
case ArgInIReg:
MONO_INST_NEW (cfg, ins, OP_MOVE);
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = mono_alloc_ireg_copy (cfg, tree->dreg);
ins->sreg1 = tree->dreg;
MONO_ADD_INS (cfg->cbb, ins);
mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
{
switch (storage) {
case ArgInIReg:
-#if defined(__default_codegen__)
- return OP_LOAD_MEMBASE;
-#elif defined(__native_client_codegen__)
+#if defined(__mono_ilp32__)
return OP_LOADI8_MEMBASE;
+#else
+ return OP_LOAD_MEMBASE;
#endif
case ArgInDoubleSSEReg:
return OP_LOADR8_MEMBASE;
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
}
+ if (cfg->compute_gc_maps) {
+ MonoInst *def;
+
+ EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, t);
+ }
}
}
}
MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
load->inst_basereg = src->dreg;
- load->inst_offset = part * SIZEOF_REGISTER;
+ load->inst_offset = part * sizeof(mgreg_t);
switch (ainfo->pair_storage [part]) {
case ArgInIReg:
MONO_ADD_INS (cfg->cbb, arg);
}
}
+
+ if (cfg->compute_gc_maps) {
+ MonoInst *def;
+ EMIT_NEW_GC_PARAM_SLOT_LIVENESS_DEF (cfg, def, ainfo->offset, &ins->klass->byval_arg);
+ }
}
}
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if !defined(__native_client__)
+#if !defined(__mono_ilp32__)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
#endif
g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
p->regs [greg ++] = PTR_TO_GREG(*(arg));
break;
-#if defined(__native_client__)
+#if defined(__mono_ilp32__)
case MONO_TYPE_I8:
case MONO_TYPE_U8:
g_assert (dinfo->cinfo->args [i + sig->hasthis].reg == param_regs [greg]);
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
- } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START)) {
+ } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_REG) || (ins2->opcode == OP_STOREI8_MEMBASE_REG) || (ins2->opcode == OP_STORE_MEMBASE_REG) || (ins2->opcode == OP_LIVERANGE_START) || (ins2->opcode == OP_GC_LIVENESS_DEF) || (ins2->opcode == OP_GC_LIVENESS_USE)) {
/* Continue */
} else if (((ins2->opcode == OP_ICONST) || (ins2->opcode == OP_I8CONST)) && (ins2->dreg == ins->dreg) && (ins2->inst_c0 == 0)) {
NULLIFY_INS (ins2);
ins->sreg2 = temp->dreg;
}
break;
-#ifndef __native_client_codegen__
- /* In AMD64 NaCl, pointers are 4 bytes, */
- /* so LOAD_* != LOADI8_* */
- /* Also, don't generate memindex opcodes (to simplify */
- /* read sandboxing) */
+#ifndef __mono_ilp32__
case OP_LOAD_MEMBASE:
+#endif
case OP_LOADI8_MEMBASE:
+#ifndef __native_client_codegen__
+ /* Don't generate memindex opcodes (to simplify */
+ /* read sandboxing) */
if (!amd64_is_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
ins->inst_indexreg = temp->dreg;
}
- break;
#endif
-#ifndef __native_client_codegen__
+ break;
+#ifndef __mono_ilp32__
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
/* Load the destination address */
g_assert (loc->opcode == OP_REGOFFSET);
- amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, SIZEOF_VOID_P);
+ amd64_mov_reg_membase (code, AMD64_RCX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_membase_reg (code, AMD64_RCX, (quad * SIZEOF_REGISTER), cinfo->ret.pair_regs [quad], SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, AMD64_RCX, (quad * sizeof(mgreg_t)), cinfo->ret.pair_regs [quad], sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
amd64_movss_membase_reg (code, AMD64_RCX, (quad * 8), cinfo->ret.pair_regs [quad]);
#endif /* DISABLE_JIT */
+#ifdef __APPLE__
+static int tls_gs_offset;
+#endif
+
+gboolean
+mono_amd64_have_tls_get (void)
+{
+#ifdef __APPLE__
+ static gboolean have_tls_get = FALSE;
+ static gboolean inited = FALSE;
+
+ if (inited)
+ return have_tls_get;
+
+ guint8 *ins = (guint8*)pthread_getspecific;
+
+ /*
+ * We're looking for these two instructions:
+ *
+ * mov %gs:[offset](,%rdi,8),%rax
+ * retq
+ */
+ have_tls_get = ins [0] == 0x65 &&
+ ins [1] == 0x48 &&
+ ins [2] == 0x8b &&
+ ins [3] == 0x04 &&
+ ins [4] == 0xfd &&
+ ins [6] == 0x00 &&
+ ins [7] == 0x00 &&
+ ins [8] == 0x00 &&
+ ins [9] == 0xc3;
+
+ inited = TRUE;
+
+ tls_gs_offset = ins[5];
+
+ return have_tls_get;
+#else
+ return TRUE;
+#endif
+}
+
/*
* mono_amd64_emit_tls_get:
* @code: buffer to store code to
g_assert (tls_offset < 64);
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
+#elif defined(__APPLE__)
+ x86_prefix (code, X86_GS_PREFIX);
+ amd64_mov_reg_mem (code, dreg, tls_gs_offset + (tls_offset * 8), 8);
#else
if (optimize_for_xen) {
x86_prefix (code, X86_FS_PREFIX);
return code;
}
+/*
+ * emit_setup_lmf:
+ *
+ * Emit code to initialize an LMF structure at LMF_OFFSET.
+ */
+static guint8*
+emit_setup_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, int cfa_offset)
+{
+ int i;
+
+ /*
+ * The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
+ */
+ /*
+ * sp is saved right before calls but we need to save it here too so
+ * async stack walks would work.
+ */
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
+ /* Skip method (only needed for trampoline LMF frames) */
+ /* Save callee saved regs */
+ for (i = 0; i < MONO_MAX_IREGS; ++i) {
+ int offset;
+
+ switch (i) {
+ case AMD64_RBX: offset = G_STRUCT_OFFSET (MonoLMF, rbx); break;
+ case AMD64_RBP: offset = G_STRUCT_OFFSET (MonoLMF, rbp); break;
+ case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
+ case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
+ case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
+#ifndef __native_client_codegen__
+ case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
+#endif
+#ifdef HOST_WIN32
+ case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
+ case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
+#endif
+ default:
+ offset = -1;
+ break;
+ }
+
+ if (offset != -1) {
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + offset, i, 8);
+ if ((cfg->arch.omit_fp || (i != AMD64_RBP)) && cfa_offset != -1)
+ mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - (lmf_offset + offset)));
+ }
+ }
+
+ /* These can't contain refs */
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), SLOT_NOREF);
+
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), SLOT_NOREF);
+#ifdef HOST_WIN32
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), SLOT_NOREF);
+ mini_gc_set_slot_type_from_fp (cfg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), SLOT_NOREF);
+#endif
+
+ return code;
+}
+
+/*
+ * emit_save_lmf:
+ *
+ * Emit code to push an LMF structure on the LMF stack.
+ */
+static guint8*
+emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset, gboolean *args_clobbered)
+{
+ if ((lmf_tls_offset != -1) && !optimize_for_xen) {
+ /*
+ * Optimized version which uses the mono_lmf TLS variable instead of
+ * indirection through the mono_lmf_addr TLS variable.
+ */
+ /* %rax = previous_lmf */
+ x86_prefix (code, X86_FS_PREFIX);
+ amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
+
+ /* Save previous_lmf */
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_RAX, 8);
+ /* Set new lmf */
+ if (lmf_offset == 0) {
+ x86_prefix (code, X86_FS_PREFIX);
+ amd64_mov_mem_reg (code, lmf_tls_offset, cfg->frame_reg, 8);
+ } else {
+ amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
+ x86_prefix (code, X86_FS_PREFIX);
+ amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
+ }
+ } else {
+ if (lmf_addr_tls_offset != -1) {
+ /* Load lmf quicky using the FS register */
+ code = mono_amd64_emit_tls_get (code, AMD64_RAX, lmf_addr_tls_offset);
+#ifdef HOST_WIN32
+ /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
+ /* FIXME: Add a separate key for LMF to avoid this */
+ amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
+#endif
+ }
+ else {
+ /*
+ * The call might clobber argument registers, but they are already
+ * saved to the stack/global regs.
+ */
+ if (args_clobbered)
+ *args_clobbered = TRUE;
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
+ (gpointer)"mono_get_lmf_addr", TRUE);
+ }
+
+ /* Save lmf_addr */
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
+ /* Save previous_lmf */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
+ /* Set new lmf */
+ amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
+ amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
+ }
+
+ return code;
+}
+
+/*
+ * emit_save_lmf:
+ *
+ * Emit code to pop an LMF structure from the LMF stack.
+ */
+static guint8*
+emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
+{
+ if ((lmf_tls_offset != -1) && !optimize_for_xen) {
+ /*
+ * Optimized version which uses the mono_lmf TLS variable instead of indirection
+ * through the mono_lmf_addr TLS variable.
+ */
+ /* reg = previous_lmf */
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
+ x86_prefix (code, X86_FS_PREFIX);
+ amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
+ } else {
+ /* Restore previous lmf */
+ amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
+ amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
+ }
+
+ return code;
+}
+
#define REAL_PRINT_REG(text,reg) \
mono_assert (reg >= 0); \
amd64_push_reg (code, AMD64_RAX); \
amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
break;
case OP_LOAD_MEM:
-#ifdef __native_client_codegen__
- /* For NaCl, pointers are 4 bytes, so separate these */
+#ifdef __mono_ilp32__
+ /* In ILP32, pointers are 4 bytes, so separate these */
/* cases, use literal 8 below where we really want 8 */
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, sizeof(gpointer));
amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
break;
case OP_MOVE:
- amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, SIZEOF_REGISTER);
+ amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof(mgreg_t));
break;
case OP_AMD64_SET_XMMREG_R4: {
amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg1);
else {
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= SIZEOF_REGISTER;
+ pos -= sizeof(mgreg_t);
/* Restore callee-saved registers */
for (i = AMD64_NREG - 1; i > 0; --i) {
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
- amd64_mov_reg_membase (code, i, AMD64_RBP, pos, SIZEOF_REGISTER);
- pos += SIZEOF_REGISTER;
+ amd64_mov_reg_membase (code, i, AMD64_RBP, pos, sizeof(mgreg_t));
+ pos += sizeof(mgreg_t);
}
}
/* Copy arguments on the stack to our argument area */
- for (i = 0; i < call->stack_usage; i += SIZEOF_REGISTER) {
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, SIZEOF_REGISTER);
- amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, SIZEOF_REGISTER);
+ for (i = 0; i < call->stack_usage; i += sizeof(mgreg_t)) {
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, i, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RBP, 16 + i, AMD64_RAX, sizeof(mgreg_t));
}
if (pos)
else
amd64_set_reg_template (code, AMD64_R11);
amd64_jump_reg (code, AMD64_R11);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CHECK_THIS:
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method, FALSE);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr, FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
}
amd64_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
call = (MonoCallInst*)ins;
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
code = emit_move_return_value (cfg, ins, code);
/* Set argument registers */
for (i = 0; i < PARAM_REGS; ++i)
- amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * SIZEOF_REGISTER, SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof(mgreg_t), sizeof(mgreg_t));
/* Make the call */
amd64_call_reg (code, AMD64_R10);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
+
/* Save result */
amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
amd64_mov_membase_reg (code, AMD64_R11, G_STRUCT_OFFSET (DynCallArgs, res), AMD64_RAX, 8);
break;
}
- case OP_AMD64_SAVE_SP_TO_LMF:
- amd64_mov_membase_reg (code, cfg->frame_reg, cfg->arch.lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
+ case OP_AMD64_SAVE_SP_TO_LMF: {
+ MonoInst *lmf_var = cfg->arch.lmf_var;
+ amd64_mov_membase_reg (code, cfg->frame_reg, lmf_var->inst_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
break;
+ }
case OP_X86_PUSH:
g_assert (!cfg->arch.no_pushes);
amd64_push_reg (code, ins->sreg1);
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
amd64_mov_reg_reg (code, AMD64_ARG_REG1, ins->sreg1, 8);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
break;
+ case OP_SHUFPS:
+ g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
+ amd64_sse_shufps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
+ break;
+ case OP_SHUFPD:
+ g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0x3);
+ amd64_sse_shufpd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
+ break;
case OP_ADDPD:
amd64_sse_addpd_reg_reg (code, ins->sreg1, ins->sreg2);
case OP_PSHLQ_REG:
amd64_sse_psllq_reg_reg (code, ins->dreg, ins->sreg2);
break;
+ case OP_CVTDQ2PD:
+ amd64_sse_cvtdq2pd_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTDQ2PS:
+ amd64_sse_cvtdq2ps_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTPD2DQ:
+ amd64_sse_cvtpd2dq_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTPD2PS:
+ amd64_sse_cvtpd2ps_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTPS2DQ:
+ amd64_sse_cvtps2dq_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTPS2PD:
+ amd64_sse_cvtps2pd_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTTPD2DQ:
+ amd64_sse_cvttpd2dq_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
+ case OP_CVTTPS2DQ:
+ amd64_sse_cvttps2dq_reg_reg (code, ins->dreg, ins->sreg1);
+ break;
case OP_ICONV_TO_X:
amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
#endif
break;
}
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
g_assert_not_reached ();
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
+mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
{
MonoJumpInfo *patch_info;
gboolean compile_aot = !run_cctors;
int alloc_size, pos, i, cfa_offset, quad, max_epilog_size;
guint8 *code;
CallInfo *cinfo;
- gint32 lmf_offset = cfg->arch.lmf_offset;
+ MonoInst *lmf_var = cfg->arch.lmf_var;
gboolean args_clobbered = FALSE;
gboolean trace = FALSE;
#ifdef __native_client_codegen__
// IP saved at CFA - 8
mono_emit_unwind_op_offset (cfg, code, AMD64_RIP, -cfa_offset);
async_exc_point (code);
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
if (!cfg->arch.omit_fp) {
amd64_push_reg (code, AMD64_RBP);
#ifdef HOST_WIN32
mono_arch_unwindinfo_add_push_nonvol (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
#endif
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset, SLOT_NOREF);
- amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, SIZEOF_REGISTER);
+ amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
async_exc_point (code);
#ifdef HOST_WIN32
offset += 8;
mono_emit_unwind_op_offset (cfg, code, i, - offset);
async_exc_point (code);
+
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, - offset, SLOT_NOREF);
}
}
if (cfg->arch.omit_fp)
// FIXME:
g_assert_not_reached ();
- cfg->stack_offset += ALIGN_TO (cfg->param_area, SIZEOF_REGISTER);
+ cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof(mgreg_t));
}
if (cfg->arch.omit_fp) {
/*
- * On enter, the stack is misaligned by the the pushing of the return
+ * On enter, the stack is misaligned by the pushing of the return
* address. It is either made aligned by the pushing of %rbp, or by
* this.
*/
alloc_size = ALIGN_TO (cfg->stack_offset, 8);
- if ((alloc_size % 16) == 0)
+ if ((alloc_size % 16) == 0) {
alloc_size += 8;
+ /* Mark the padding slot as NOREF */
+ mini_gc_set_slot_type_from_cfa (cfg, -cfa_offset - sizeof (mgreg_t), SLOT_NOREF);
+ }
} else {
alloc_size = ALIGN_TO (cfg->stack_offset, MONO_ARCH_FRAME_ALIGNMENT);
-
+ if (cfg->stack_offset != alloc_size) {
+ /* Mark the padding slot as NOREF */
+ mini_gc_set_slot_type_from_fp (cfg, -alloc_size + cfg->param_area, SLOT_NOREF);
+ }
+ cfg->arch.sp_fp_offset = alloc_size;
alloc_size -= pos;
}
/* Save LMF */
if (method->save_lmf) {
- /*
- * The ip field is not set, the exception handling code will obtain it from the stack location pointed to by the sp field.
- */
- /*
- * sp is saved right before calls but we need to save it here too so
- * async stack walks would work.
- */
- amd64_mov_membase_reg (code, cfg->frame_reg, cfg->arch.lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
- /* Skip method (only needed for trampoline LMF frames) */
- /* Save callee saved regs */
- for (i = 0; i < MONO_MAX_IREGS; ++i) {
- int offset;
-
- switch (i) {
- case AMD64_RBX: offset = G_STRUCT_OFFSET (MonoLMF, rbx); break;
- case AMD64_RBP: offset = G_STRUCT_OFFSET (MonoLMF, rbp); break;
- case AMD64_R12: offset = G_STRUCT_OFFSET (MonoLMF, r12); break;
- case AMD64_R13: offset = G_STRUCT_OFFSET (MonoLMF, r13); break;
- case AMD64_R14: offset = G_STRUCT_OFFSET (MonoLMF, r14); break;
-#ifndef __native_client_codegen__
- case AMD64_R15: offset = G_STRUCT_OFFSET (MonoLMF, r15); break;
-#endif
-#ifdef HOST_WIN32
- case AMD64_RDI: offset = G_STRUCT_OFFSET (MonoLMF, rdi); break;
- case AMD64_RSI: offset = G_STRUCT_OFFSET (MonoLMF, rsi); break;
-#endif
- default:
- offset = -1;
- break;
- }
-
- if (offset != -1) {
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + offset, i, 8);
- if (cfg->arch.omit_fp || (i != AMD64_RBP))
- mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - (lmf_offset + offset)));
- }
- }
+ code = emit_setup_lmf (cfg, code, lmf_var->inst_offset, cfa_offset);
}
/* Save callee saved registers */
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
amd64_mov_membase_reg (code, AMD64_RSP, save_area_offset, i, 8);
mono_emit_unwind_op_offset (cfg, code, i, - (cfa_offset - save_area_offset));
+
+ /* These are handled automatically by the stack marking code */
+ mini_gc_set_slot_type_from_cfa (cfg, - (cfa_offset - save_area_offset), SLOT_NOREF);
+
save_area_offset += 8;
async_exc_point (code);
}
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * SIZEOF_REGISTER), ainfo->pair_regs [quad], SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
- amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * SIZEOF_REGISTER), ainfo->pair_regs [quad]);
+ amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
- amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * SIZEOF_REGISTER), ainfo->pair_regs [quad]);
+ amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgNone:
break;
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * SIZEOF_REGISTER), ainfo->pair_regs [quad], SIZEOF_REGISTER);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
- amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * SIZEOF_REGISTER), ainfo->pair_regs [quad]);
+ amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgInDoubleSSEReg:
- amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * SIZEOF_REGISTER), ainfo->pair_regs [quad]);
+ amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
break;
case ArgNone:
break;
}
if (method->save_lmf) {
- if ((lmf_tls_offset != -1) && !optimize_for_xen) {
- /*
- * Optimized version which uses the mono_lmf TLS variable instead of
- * indirection through the mono_lmf_addr TLS variable.
- */
- /* %rax = previous_lmf */
- x86_prefix (code, X86_FS_PREFIX);
- amd64_mov_reg_mem (code, AMD64_RAX, lmf_tls_offset, 8);
-
- /* Save previous_lmf */
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_RAX, 8);
- /* Set new lmf */
- if (lmf_offset == 0) {
- x86_prefix (code, X86_FS_PREFIX);
- amd64_mov_mem_reg (code, lmf_tls_offset, cfg->frame_reg, 8);
- } else {
- amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
- x86_prefix (code, X86_FS_PREFIX);
- amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
- }
- } else {
- if (lmf_addr_tls_offset != -1) {
- /* Load lmf quicky using the FS register */
- code = mono_amd64_emit_tls_get (code, AMD64_RAX, lmf_addr_tls_offset);
-#ifdef HOST_WIN32
- /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
- /* FIXME: Add a separate key for LMF to avoid this */
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
-#endif
- }
- else {
- /*
- * The call might clobber argument registers, but they are already
- * saved to the stack/global regs.
- */
- args_clobbered = TRUE;
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_get_lmf_addr", TRUE);
- }
-
- /* Save lmf_addr */
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
- /* Save previous_lmf */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
- amd64_mov_membase_reg (code, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
- /* Set new lmf */
- amd64_lea_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset);
- amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
- }
+ code = emit_save_lmf (cfg, code, lmf_var->inst_offset, &args_clobbered);
}
if (trace) {
guint8 *code;
int max_epilog_size;
CallInfo *cinfo;
- gint32 lmf_offset = cfg->arch.lmf_offset;
+ gint32 lmf_offset = cfg->arch.lmf_var ? ((MonoInst*)cfg->arch.lmf_var)->inst_offset : -1;
max_epilog_size = get_max_epilog_size (cfg);
/* check if we need to restore protection of the stack after a stack overflow */
if (mono_get_jit_tls_offset () != -1) {
guint8 *patch;
- code = mono_amd64_emit_tls_get (code, X86_ECX, mono_get_jit_tls_offset ());
+ code = mono_amd64_emit_tls_get (code, AMD64_RCX, mono_get_jit_tls_offset ());
/* we load the value in a separate instruction: this mechanism may be
* used later as a safer way to do thread interruption
*/
- amd64_mov_reg_membase (code, X86_ECX, X86_ECX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 8);
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RCX, G_STRUCT_OFFSET (MonoJitTlsData, restore_stack_prot), 8);
x86_alu_reg_imm (code, X86_CMP, X86_ECX, 0);
patch = code;
- x86_branch8 (code, X86_CC_Z, 0, FALSE);
+ x86_branch8 (code, X86_CC_Z, 0, FALSE);
/* note that the call trampoline will preserve eax/edx */
x86_call_reg (code, X86_ECX);
x86_patch (patch, code);
} else {
/* FIXME: maybe save the jit tls in the prolog */
}
- if ((lmf_tls_offset != -1) && !optimize_for_xen) {
- /*
- * Optimized version which uses the mono_lmf TLS variable instead of indirection
- * through the mono_lmf_addr TLS variable.
- */
- /* reg = previous_lmf */
- amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
- x86_prefix (code, X86_FS_PREFIX);
- amd64_mov_mem_reg (code, lmf_tls_offset, AMD64_R11, 8);
- } else {
- /* Restore previous lmf */
- amd64_mov_reg_membase (code, AMD64_RCX, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
- amd64_mov_reg_membase (code, AMD64_R11, cfg->frame_reg, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
- amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
- }
+
+ code = emit_restore_lmf (cfg, code, lmf_offset);
/* Restore caller saved regs */
if (cfg->used_int_regs & (1 << AMD64_RBP)) {
else {
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i)))
- pos -= SIZEOF_REGISTER;
+ pos -= sizeof(mgreg_t);
if (pos) {
- if (pos == - SIZEOF_REGISTER) {
+ if (pos == - sizeof(mgreg_t)) {
/* Only one register, so avoid lea */
for (i = AMD64_NREG - 1; i > 0; --i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
for (quad = 0; quad < 2; quad ++) {
switch (ainfo->pair_storage [quad]) {
case ArgInIReg:
- amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * SIZEOF_REGISTER), SIZEOF_REGISTER);
+ amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)), sizeof(mgreg_t));
break;
case ArgInFloatSSEReg:
- amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * SIZEOF_REGISTER));
+ amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
break;
case ArgInDoubleSSEReg:
- amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * SIZEOF_REGISTER));
+ amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof(mgreg_t)));
break;
case ArgNone:
break;
switch (patch_info->type) {
case MONO_PATCH_INFO_R8:
case MONO_PATCH_INFO_R4: {
- guint8 *pos, *patch_pos, *target_pos;
+ guint8 *pos, *patch_pos;
+ guint32 target_pos;
/* The SSE opcodes require a 16 byte alignment */
#if defined(__default_codegen__)
g_assert ((code - start) < 64);
}
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_global_codeman_validate(&start, 64, &code);
-#endif
mono_debug_add_delegate_trampoline (start, code - start);
mono_stats.imt_thunks_size += code - start;
g_assert (code - start <= size);
-#if defined(__native_client_codegen__) && defined(__native_client__)
nacl_domain_code_validate(domain, &start, size, &code);
-#endif
return start;
}