mono_arch_compute_omit_fp (cfg);
- if (cfg->globalra) {
- if (cfg->arch.omit_fp)
- regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
-
- regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
- regs = g_list_prepend (regs, (gpointer)AMD64_R12);
- regs = g_list_prepend (regs, (gpointer)AMD64_R13);
- regs = g_list_prepend (regs, (gpointer)AMD64_R14);
-#ifndef __native_client_codegen__
- regs = g_list_prepend (regs, (gpointer)AMD64_R15);
-#endif
-
- regs = g_list_prepend (regs, (gpointer)AMD64_R10);
- regs = g_list_prepend (regs, (gpointer)AMD64_R9);
- regs = g_list_prepend (regs, (gpointer)AMD64_R8);
- regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
- regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
- regs = g_list_prepend (regs, (gpointer)AMD64_RDX);
- regs = g_list_prepend (regs, (gpointer)AMD64_RCX);
- regs = g_list_prepend (regs, (gpointer)AMD64_RAX);
- } else {
- if (cfg->arch.omit_fp)
- regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+ if (cfg->arch.omit_fp)
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
- /* We use the callee saved registers for global allocation */
- regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
- regs = g_list_prepend (regs, (gpointer)AMD64_R12);
- regs = g_list_prepend (regs, (gpointer)AMD64_R13);
- regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+ /* We use the callee saved registers for global allocation */
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R12);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R13);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R14);
#ifndef __native_client_codegen__
- regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R15);
#endif
#ifdef TARGET_WIN32
- regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
- regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
#endif
- }
return regs;
}
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
if ((MONO_TYPE_ISSTRUCT (sig_ret) && !mono_class_from_mono_type (sig_ret)->enumtype) || ((sig_ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
- if (cfg->globalra) {
- cfg->vret_addr->opcode = OP_REGVAR;
- cfg->vret_addr->inst_c0 = cinfo->ret.reg;
+ /* The register is volatile */
+ cfg->vret_addr->opcode = OP_REGOFFSET;
+ cfg->vret_addr->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp) {
+ cfg->vret_addr->inst_offset = offset;
+ offset += 8;
} else {
- /* The register is volatile */
- cfg->vret_addr->opcode = OP_REGOFFSET;
- cfg->vret_addr->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp) {
- cfg->vret_addr->inst_offset = offset;
- offset += 8;
- } else {
- offset += 8;
- cfg->vret_addr->inst_offset = -offset;
- }
- if (G_UNLIKELY (cfg->verbose_level > 1)) {
- printf ("vret_addr =");
- mono_print_ins (cfg->vret_addr);
- }
+ offset += 8;
+ cfg->vret_addr->inst_offset = -offset;
+ }
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr =");
+ mono_print_ins (cfg->vret_addr);
}
}
else {
default:
g_assert_not_reached ();
}
- if (!cfg->globalra)
- cfg->ret->dreg = cfg->ret->inst_c0;
+ cfg->ret->dreg = cfg->ret->inst_c0;
}
/* Allocate locals */
- if (!cfg->globalra) {
- offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
- if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
- char *mname = mono_method_full_name (cfg->method, TRUE);
- cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
- cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
- g_free (mname);
- return;
- }
+ offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
+ char *mname = mono_method_full_name (cfg->method, TRUE);
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ g_free (mname);
+ return;
+ }
- if (locals_stack_align) {
- offset += (locals_stack_align - 1);
- offset &= ~(locals_stack_align - 1);
- }
- if (cfg->arch.omit_fp) {
- cfg->locals_min_stack_offset = offset;
- cfg->locals_max_stack_offset = offset + locals_stack_size;
- } else {
- cfg->locals_min_stack_offset = - (offset + locals_stack_size);
- cfg->locals_max_stack_offset = - offset;
- }
+ if (locals_stack_align) {
+ offset += (locals_stack_align - 1);
+ offset &= ~(locals_stack_align - 1);
+ }
+ if (cfg->arch.omit_fp) {
+ cfg->locals_min_stack_offset = offset;
+ cfg->locals_max_stack_offset = offset + locals_stack_size;
+ } else {
+ cfg->locals_min_stack_offset = - (offset + locals_stack_size);
+ cfg->locals_max_stack_offset = - offset;
+ }
- for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
- if (offsets [i] != -1) {
- MonoInst *ins = cfg->varinfo [i];
- ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp)
- ins->inst_offset = (offset + offsets [i]);
- else
- ins->inst_offset = - (offset + offsets [i]);
- //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
- }
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ if (offsets [i] != -1) {
+ MonoInst *ins = cfg->varinfo [i];
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp)
+ ins->inst_offset = (offset + offsets [i]);
+ else
+ ins->inst_offset = - (offset + offsets [i]);
+ //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
}
- offset += locals_stack_size;
}
+ offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
g_assert (!cfg->arch.omit_fp);
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
- if (cfg->globalra) {
- /* The new allocator needs info about the original locations of the arguments */
- switch (ainfo->storage) {
- case ArgInIReg:
- case ArgInFloatSSEReg:
- case ArgInDoubleSSEReg:
- ins->opcode = OP_REGVAR;
- ins->inst_c0 = ainfo->reg;
- break;
- case ArgOnStack:
- g_assert (!cfg->arch.omit_fp);
- ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- ins->inst_offset = ainfo->offset + ARGS_OFFSET;
- break;
- case ArgValuetypeInReg:
- ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- /* These arguments are saved to the stack in the prolog */
- offset = ALIGN_TO (offset, sizeof(mgreg_t));
- if (cfg->arch.omit_fp) {
- ins->inst_offset = offset;
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
- } else {
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
- ins->inst_offset = - offset;
- }
- break;
- default:
- g_assert_not_reached ();
- }
-
- continue;
- }
-
/* FIXME: Allocate volatile arguments to registers */
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
amd64_pop_reg (code, AMD64_RDI);
break;
}
+ case OP_GENERIC_CLASS_INIT: {
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ guint8 *jump;
+
+ g_assert (ins->sreg1 == MONO_AMD64_ARG_REG1);
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ amd64_test_membase_imm_size (code, ins->sreg1, byte_offset, bitmask, 1);
+ jump = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
+
+ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
+
+ x86_patch (jump, code);
+ break;
+ }
+
case OP_X86_LEA:
amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
break;
break;
}
case OP_GC_SAFE_POINT: {
- gpointer polling_func = NULL;
+ const char *polling_func = NULL;
int compare_val = 0;
guint8 *br [1];
#if defined (USE_COOP_GC)
- polling_func = (gpointer)mono_threads_state_poll;
+ polling_func = "mono_threads_state_poll";
compare_val = 1;
#elif defined(__native_client_codegen__) && defined(__native_client_gc__)
- polling_func = (gpointer)mono_nacl_gc;
+ polling_func = "mono_nacl_gc";
compare_val = 0xFFFFFFFF;
#endif
if (!polling_func)
amd64_test_membase_imm_size (code, ins->sreg1, 0, compare_val, 4);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, polling_func, TRUE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func, FALSE);
amd64_patch (br[0], code);
break;
}
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
+mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
- MonoJumpInfo *patch_info;
- gboolean compile_aot = !run_cctors;
-
- for (patch_info = ji; patch_info; patch_info = patch_info->next) {
- unsigned char *ip = patch_info->ip.i + code;
- unsigned char *target;
+ unsigned char *ip = ji->ip.i + code;
- if (compile_aot) {
- switch (patch_info->type) {
- case MONO_PATCH_INFO_BB:
- case MONO_PATCH_INFO_LABEL:
+ /*
+ * Debug code to help track down problems where the target of a near call is
+ * is not valid.
+ */
+ if (amd64_is_near_call (ip)) {
+ gint64 disp = (guint8*)target - (guint8*)ip;
+
+ if (!amd64_is_imm32 (disp)) {
+ printf ("TYPE: %d\n", ji->type);
+ switch (ji->type) {
+ case MONO_PATCH_INFO_INTERNAL_METHOD:
+ printf ("V: %s\n", ji->data.name);
+ break;
+ case MONO_PATCH_INFO_METHOD_JUMP:
+ case MONO_PATCH_INFO_METHOD:
+ printf ("V: %s\n", ji->data.method->name);
break;
default:
- /* No need to patch these */
- continue;
- }
- }
-
- target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
-
- switch (patch_info->type) {
- case MONO_PATCH_INFO_NONE:
- continue;
- case MONO_PATCH_INFO_METHOD_REL:
- case MONO_PATCH_INFO_R8:
- case MONO_PATCH_INFO_R4:
- g_assert_not_reached ();
- continue;
- case MONO_PATCH_INFO_BB:
- break;
- default:
- break;
- }
-
- /*
- * Debug code to help track down problems where the target of a near call is
- * is not valid.
- */
- if (amd64_is_near_call (ip)) {
- gint64 disp = (guint8*)target - (guint8*)ip;
-
- if (!amd64_is_imm32 (disp)) {
- printf ("TYPE: %d\n", patch_info->type);
- switch (patch_info->type) {
- case MONO_PATCH_INFO_INTERNAL_METHOD:
- printf ("V: %s\n", patch_info->data.name);
- break;
- case MONO_PATCH_INFO_METHOD_JUMP:
- case MONO_PATCH_INFO_METHOD:
- printf ("V: %s\n", patch_info->data.method->name);
- break;
- default:
- break;
- }
+ break;
}
}
-
- amd64_patch (ip, (gpointer)target);
}
+
+ amd64_patch (ip, (gpointer)target);
}
#ifndef DISABLE_JIT
/* Unused arguments */
continue;
- if (cfg->globalra) {
- /* All the other moves are done by the register allocator */
- switch (ainfo->storage) {
- case ArgInFloatSSEReg:
- amd64_sse_cvtss2sd_reg_reg (code, ainfo->reg, ainfo->reg);
- break;
- case ArgValuetypeInReg:
- for (quad = 0; quad < 2; quad ++) {
- switch (ainfo->pair_storage [quad]) {
- case ArgInIReg:
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
- break;
- case ArgInFloatSSEReg:
- amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
- break;
- case ArgInDoubleSSEReg:
- amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
- break;
- case ArgNone:
- break;
- default:
- g_assert_not_reached ();
- }
- }
- break;
- default:
- break;
- }
-
- continue;
- }
-
/* Save volatile arguments to the stack */
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
mono_sigctx_to_monoctx (sigctx, &ctx);
- rip = (guint8*)ctx.rip;
+ rip = (guint8*)ctx.gregs [AMD64_RIP];
if (IS_REX (rip [0])) {
reg = amd64_rex_b (rip [0]);
/* idiv REG */
reg += x86_modrm_rm (rip [1]);
- switch (reg) {
- case AMD64_RAX:
- value = ctx.rax;
- break;
- case AMD64_RBX:
- value = ctx.rbx;
- break;
- case AMD64_RCX:
- value = ctx.rcx;
- break;
- case AMD64_RDX:
- value = ctx.rdx;
- break;
- case AMD64_RBP:
- value = ctx.rbp;
- break;
- case AMD64_RSP:
- value = ctx.rsp;
- break;
- case AMD64_RSI:
- value = ctx.rsi;
- break;
- case AMD64_RDI:
- value = ctx.rdi;
- break;
- case AMD64_R12:
- value = ctx.r12;
- break;
- case AMD64_R13:
- value = ctx.r13;
- break;
- case AMD64_R14:
- value = ctx.r14;
- break;
- case AMD64_R15:
- value = ctx.r15;
- break;
- default:
- g_assert_not_reached ();
- reg = -1;
- }
+ value = ctx.gregs [reg];
if (value == -1)
return TRUE;
return 0;
}
-#define _CTX_REG(ctx,fld,i) ((&ctx->fld)[i])
-
mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
- switch (reg) {
- case AMD64_RCX: return ctx->rcx;
- case AMD64_RDX: return ctx->rdx;
- case AMD64_RBX: return ctx->rbx;
- case AMD64_RBP: return ctx->rbp;
- case AMD64_RSP: return ctx->rsp;
- default:
- return _CTX_REG (ctx, rax, reg);
- }
+ return ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
{
- switch (reg) {
- case AMD64_RCX:
- ctx->rcx = val;
- break;
- case AMD64_RDX:
- ctx->rdx = val;
- break;
- case AMD64_RBX:
- ctx->rbx = val;
- break;
- case AMD64_RBP:
- ctx->rbp = val;
- break;
- case AMD64_RSP:
- ctx->rsp = val;
- break;
- default:
- _CTX_REG (ctx, rax, reg) = val;
- }
+ ctx->gregs [reg] = val;
}
gpointer