return "unknown";
}
-G_GNUC_UNUSED static void
-break_count (void)
-{
-}
-
-G_GNUC_UNUSED static gboolean
-debug_count (void)
-{
- static int count = 0;
- count ++;
-
- if (!getenv ("COUNT"))
- return TRUE;
-
- if (count == atoi (getenv ("COUNT"))) {
- break_count ();
- }
-
- if (count > atoi (getenv ("COUNT"))) {
- return FALSE;
- }
-
- return TRUE;
-}
-
static gboolean
debug_omit_fp (void)
{
#if 0
- return debug_count ();
+ return mono_debug_count ();
#else
return TRUE;
#endif
break;
}
/* fall through */
+#if defined( __native_client_codegen__ )
+ case MONO_TYPE_TYPEDBYREF:
+#endif
case MONO_TYPE_VALUETYPE: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
}
break;
}
+#if !defined( __native_client_codegen__ )
case MONO_TYPE_TYPEDBYREF:
/* Same as a valuetype with size 24 */
cinfo->vtype_retaddr = TRUE;
break;
+#endif
case MONO_TYPE_VOID:
break;
default:
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_TYPEDBYREF:
-#ifdef HOST_WIN32
+#if defined( HOST_WIN32 ) || defined( __native_client_codegen__ )
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
#else
stack_size += sizeof (MonoTypedRef);
* Returns the size of the argument area on the stack.
*/
int
-mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
CallInfo *cinfo = get_call_info (NULL, NULL, csig);
* This function returns the optimizations supported on this cpu.
*/
guint32
-mono_arch_cpu_optimizazions (guint32 *exclude_mask)
+mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
int eax, ebx, ecx, edx;
guint32 opts = 0;
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
+ if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || ((sig->ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
cfg->vret_addr->opcode = OP_REGVAR;
cfg->vret_addr->inst_c0 = cinfo->ret.reg;
}
} else {
if (cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = offset;
- /* Reserve space for caller saved registers */
+ /* Reserve space for callee saved registers */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
offset += sizeof(mgreg_t);
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
+ if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || ((sig->ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
if (cfg->globalra) {
cfg->vret_addr->opcode = OP_REGVAR;
cfg->vret_addr->inst_c0 = cinfo->ret.reg;
}
}
else {
- if (cfg->abs_patches && g_hash_table_lookup (cfg->abs_patches, data)) {
- /*
- * This is not really an optimization, but required because the
- * generic class init trampolines use R11 to pass the vtable.
- */
- near_call = TRUE;
+ MonoJumpInfo *jinfo = NULL;
+
+ if (cfg->abs_patches)
+ jinfo = g_hash_table_lookup (cfg->abs_patches, data);
+ if (jinfo) {
+ if (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
+ if ((((guint64)data) >> 32) == 0)
+ near_call = TRUE;
+ no_patch = TRUE;
+ } else {
+ /*
+ * This is not really an optimization, but required because the
+ * generic class init trampolines use R11 to pass the vtable.
+ */
+ near_call = TRUE;
+ }
} else {
MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
if (info) {
- if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) &&
- strstr (cfg->method->name, info->name)) {
- /* A call to the wrapped function */
- if ((((guint64)data) >> 32) == 0)
- near_call = TRUE;
- no_patch = TRUE;
- }
- else if (info->func == info->wrapper) {
+ if (info->func == info->wrapper) {
/* No wrapper */
if ((((guint64)info->func) >> 32) == 0)
near_call = TRUE;
#ifdef MONO_ARCH_NOMAP32BIT
near_call = FALSE;
#endif
-
+#if defined(__native_client__)
+ /* Always use near_call == TRUE for Native Client */
+ near_call = TRUE;
+#endif
/* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
if (optimize_for_xen)
near_call = FALSE;
#ifdef __APPLE__
static gboolean have_tls_get = FALSE;
static gboolean inited = FALSE;
+ guint8 *ins;
if (inited)
return have_tls_get;
- guint8 *ins = (guint8*)pthread_getspecific;
+ ins = (guint8*)pthread_getspecific;
/*
* We're looking for these two instructions:
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
#ifndef DISABLE_JIT
-
-#if defined(__native_client__) || defined(__native_client_codegen__)
-void mono_nacl_gc()
-{
-#ifdef __native_client_gc__
- __nacl_suspend_thread_if_needed();
-#endif
-}
-#endif
-
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
+#if defined(__mono_ilp32__)
+ /* Comparison of pointer immediates should be 4 bytes to avoid sign-extend problems */
+ g_assert (amd64_is_imm32 (ins->inst_imm));
+ amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
+ break;
+#endif
case OP_LCOMPARE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
}
+ /*
+ * Add an additional nop so skipping the bp doesn't cause the ip to point
+ * to another IL offset.
+ */
+ x86_nop (code);
break;
}
case OP_ADDCC:
+ case OP_LADDCC:
case OP_LADD:
amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
break;
amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
break;
case OP_SUBCC:
+ case OP_LSUBCC:
case OP_LSUB:
amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
break;
}
case OP_LDIV:
case OP_LREM:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
/* Regalloc magic makes the div/rem cases the same */
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
break;
case OP_LDIV_UN:
case OP_LREM_UN:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
break;
case OP_IDIV:
case OP_IREM:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq_size (code, 4);
break;
case OP_IDIV_UN:
case OP_IREM_UN:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg2, 0, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
* done:
*/
- if (value != AMD64_RDX)
- amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
- amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
- if (shifted_nursery_start >> 31) {
- /*
- * The value we need to compare against is 64 bits, so we need
- * another spare register. We use RBX, which we save and
- * restore.
- */
- amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
- amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
- amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
- } else {
- amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
+ if (mono_gc_card_table_nursery_check ()) {
+ if (value != AMD64_RDX)
+ amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
+ amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
+ if (shifted_nursery_start >> 31) {
+ /*
+ * The value we need to compare against is 64 bits, so we need
+ * another spare register. We use RBX, which we save and
+ * restore.
+ */
+ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
+ amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
+ amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
+ } else {
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
+ }
+ br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
}
- br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift);
if (card_table_mask)
amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0);
amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1);
- x86_patch (br, code);
+ if (mono_gc_card_table_nursery_check ())
+ x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
break;
}
case OP_NACL_GC_SAFE_POINT: {
-#if defined(__native_client_codegen__)
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+#if defined(__native_client_codegen__) && defined(__native_client_gc__)
+ if (cfg->compile_aot)
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+ else {
+ guint8 *br [1];
+
+ amd64_mov_reg_imm_size (code, AMD64_R11, (gpointer)&__nacl_thread_suspension_needed, 4);
+ amd64_test_membase_imm_size (code, AMD64_R11, 0, 0xFFFFFFFF, 4);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+ amd64_patch (br[0], code);
+ }
#endif
break;
}
(cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
+
+ mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
+ mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0);
}
/* compute max_length in order to use short forward jumps */
size = 8;
*/
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size);
+
+ /*
+ * Save the original location of 'this',
+ * get_generic_info_from_stack_frame () needs this to properly look up
+ * the argument value during the handling of async exceptions.
+ */
+ if (ins == cfg->args [0]) {
+ mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
+ mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
+ }
break;
}
case ArgInFloatSSEReg:
default:
g_assert_not_reached ();
}
- }
- }
-
- /* Might need to attach the thread to the JIT or change the domain for the callback */
- if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
- guint64 domain = (guint64)cfg->domain;
-
- args_clobbered = TRUE;
- /*
- * The call might clobber argument registers, but they are already
- * saved to the stack/global regs.
- */
- if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) {
- guint8 *buf, *no_domain_branch;
-
- code = mono_amd64_emit_tls_get (code, AMD64_RAX, appdomain_tls_offset);
- if (cfg->compile_aot) {
- /* AOT code is only used in the root domain */
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0);
- } else {
- if ((domain >> 32) == 0)
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
- else
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
- }
- amd64_alu_reg_reg (code, X86_CMP, AMD64_RAX, AMD64_ARG_REG1);
- no_domain_branch = code;
- x86_branch8 (code, X86_CC_NE, 0, 0);
- code = mono_amd64_emit_tls_get ( code, AMD64_RAX, lmf_addr_tls_offset);
- amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
- buf = code;
- x86_branch8 (code, X86_CC_NE, 0, 0);
- amd64_patch (no_domain_branch, code);
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_jit_thread_attach", TRUE);
- amd64_patch (buf, code);
-#ifdef HOST_WIN32
- /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
- /* FIXME: Add a separate key for LMF to avoid this */
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
-#endif
- } else {
- g_assert (!cfg->compile_aot);
- if (cfg->compile_aot) {
- /* AOT code is only used in the root domain */
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0);
- } else {
- if ((domain >> 32) == 0)
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
- else
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
+ if (ins == cfg->args [0]) {
+ mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
+ mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0);
}
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_jit_thread_attach", TRUE);
}
}
appdomain_tls_offset = -1;
if (lmf_tls_offset >= 64)
lmf_tls_offset = -1;
+ if (lmf_addr_tls_offset >= 64)
+ lmf_addr_tls_offset = -1;
#else
#ifdef MONO_XEN_OPT
optimize_for_xen = access ("/proc/xen", F_OK) == 0;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
if (amd64_is_imm32 (item->key))
- amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
+ amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
/* enable the commented code to assert on wrong method */
#if 0
if (amd64_is_imm32 (item->key))
- amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
+ amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
} else {
if (amd64_is_imm32 (item->key))
- amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
+ amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);