X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fmini-arm64.c;h=6192f26b9cf042e653ad287c859bb881aafebca0;hb=HEAD;hp=e19467f8945dc90d4f2b27a41abd00b30e430501;hpb=a342bb00ca0e7b7c5267845f45b928055f412b2c;p=mono.git diff --git a/mono/mini/mini-arm64.c b/mono/mini/mini-arm64.c index e19467f8945..6192f26b9cf 100644 --- a/mono/mini/mini-arm64.c +++ b/mono/mini/mini-arm64.c @@ -1,5 +1,6 @@ -/* - * mini-arm64.c: ARM64 backend for the Mono code generator +/** + * \file + * ARM64 backend for the Mono code generator * * Copyright 2013 Xamarin, Inc (http://www.xamarin.com) * @@ -53,7 +54,7 @@ static gpointer bp_trampoline; static gboolean ios_abi; -static __attribute__((warn_unused_result)) guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset); +static __attribute__ ((__warn_unused_result__)) guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset); const char* mono_arch_regname (int reg) @@ -328,7 +329,7 @@ emit_imm64_template (guint8 *code, int dreg) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_addw_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { @@ -340,7 +341,7 @@ emit_addw_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_addx_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { @@ -352,7 +353,7 @@ emit_addx_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_subw_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { @@ -364,7 +365,7 @@ emit_subw_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_subx_imm (guint8 *code, int dreg, int sreg, int imm) { if (!arm_is_arith_imm (imm)) { @@ -377,7 +378,7 @@ emit_subx_imm (guint8 *code, int dreg, int sreg, int imm) } /* Emit sp+=imm. Clobbers ip0/ip1 */ -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_addx_sp_imm (guint8 *code, int imm) { code = emit_imm (code, ARMREG_IP0, imm); @@ -388,7 +389,7 @@ emit_addx_sp_imm (guint8 *code, int imm) } /* Emit sp-=imm. Clobbers ip0/ip1 */ -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_subx_sp_imm (guint8 *code, int imm) { code = emit_imm (code, ARMREG_IP0, imm); @@ -398,7 +399,7 @@ emit_subx_sp_imm (guint8 *code, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_andw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: @@ -408,7 +409,7 @@ emit_andw_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_andx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: @@ -418,7 +419,7 @@ emit_andx_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_orrw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: @@ -428,7 +429,7 @@ emit_orrw_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_orrx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: @@ -438,7 +439,7 @@ emit_orrx_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_eorw_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: @@ -448,7 +449,7 @@ emit_eorw_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_eorx_imm (guint8 *code, int dreg, int sreg, int imm) { // FIXME: @@ -458,7 +459,7 @@ emit_eorx_imm (guint8 *code, int dreg, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_cmpw_imm (guint8 *code, int sreg, int imm) { if (imm == 0) { @@ -472,7 +473,7 @@ emit_cmpw_imm (guint8 *code, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_cmpx_imm (guint8 *code, int sreg, int imm) { if (imm == 0) { @@ -486,7 +487,7 @@ emit_cmpx_imm (guint8 *code, int sreg, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_strb (guint8 *code, int rt, int rn, int imm) { if (arm_is_strb_imm (imm)) { @@ -500,7 +501,7 @@ emit_strb (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_strh (guint8 *code, int rt, int rn, int imm) { if (arm_is_strh_imm (imm)) { @@ -514,7 +515,7 @@ emit_strh (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_strw (guint8 *code, int rt, int rn, int imm) { if (arm_is_strw_imm (imm)) { @@ -528,7 +529,7 @@ emit_strw (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_strfpw (guint8 *code, int rt, int rn, int imm) { if (arm_is_strw_imm (imm)) { @@ -542,7 +543,7 @@ emit_strfpw (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_strfpx (guint8 *code, int rt, int rn, int imm) { if (arm_is_strx_imm (imm)) { @@ -556,7 +557,7 @@ emit_strfpx (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_strx (guint8 *code, int rt, int rn, int imm) { if (arm_is_strx_imm (imm)) { @@ -570,7 +571,7 @@ emit_strx (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrb (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 1)) { @@ -584,7 +585,7 @@ emit_ldrb (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrsbx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 1)) { @@ -598,7 +599,7 @@ emit_ldrsbx (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrh (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 2)) { @@ -612,7 +613,7 @@ emit_ldrh (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrshx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 2)) { @@ -626,7 +627,7 @@ emit_ldrshx (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrswx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { @@ -640,7 +641,7 @@ emit_ldrswx (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrw (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { @@ -653,7 +654,7 @@ emit_ldrw (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 8)) { @@ -666,7 +667,7 @@ emit_ldrx (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrfpw (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 4)) { @@ -680,7 +681,7 @@ emit_ldrfpw (guint8 *code, int rt, int rn, int imm) return code; } -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_ldrfpx (guint8 *code, int rt, int rn, int imm) { if (arm_is_pimm12_scaled (imm, 8)) { @@ -745,6 +746,16 @@ mono_arm_emit_aotconst (gpointer ji, guint8 *code, guint8 *code_start, int dreg, return emit_aotconst_full (NULL, (MonoJumpInfo**)ji, code, code_start, dreg, patch_type, data); } +gboolean +mono_arch_have_fast_tls (void) +{ +#ifdef TARGET_IOS + return FALSE; +#else + return TRUE; +#endif +} + static guint8* emit_tls_get (guint8 *code, int dreg, int tls_offset) { @@ -758,15 +769,6 @@ emit_tls_get (guint8 *code, int dreg, int tls_offset) return code; } -static guint8* -emit_tls_get_reg (guint8 *code, int dreg, int offset_reg) -{ - g_assert (offset_reg != ARMREG_IP0); - arm_mrs (code, ARMREG_IP0, ARM_MRS_REG_TPIDR_EL0); - arm_ldrx_reg (code, dreg, ARMREG_IP0, offset_reg); - return code; -} - static guint8* emit_tls_set (guint8 *code, int sreg, int tls_offset) { @@ -783,25 +785,13 @@ emit_tls_set (guint8 *code, int sreg, int tls_offset) return code; } - -static guint8* -emit_tls_set_reg (guint8 *code, int sreg, int offset_reg) -{ - int tmpreg = ARMREG_IP0; - - g_assert (sreg != tmpreg); - arm_mrs (code, tmpreg, ARM_MRS_REG_TPIDR_EL0); - arm_strx_reg (code, sreg, tmpreg, offset_reg); - return code; -} - /* * Emits * - mov sp, fp * - ldrp [fp, lr], [sp], !stack_offfset * Clobbers TEMP_REGS. */ -__attribute__((warn_unused_result)) guint8* +__attribute__ ((__warn_unused_result__)) guint8* mono_arm_emit_destroy_frame (guint8 *code, int stack_offset, guint64 temp_regs) { arm_movspx (code, ARMREG_SP, ARMREG_FP); @@ -1004,7 +994,7 @@ mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, Mo switch (ji->type) { case MONO_PATCH_INFO_METHOD_JUMP: /* ji->relocation is not set by the caller */ - arm_patch_rel (ip, (guint8*)target, MONO_R_ARM64_B); + arm_patch_full (cfg, domain, ip, (guint8*)target, MONO_R_ARM64_B); break; default: arm_patch_full (cfg, domain, ip, (guint8*)target, ji->relocation); @@ -1239,7 +1229,6 @@ add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) case MONO_TYPE_I1: add_general (cinfo, ainfo, 1, TRUE); break; - case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: add_general (cinfo, ainfo, 1, FALSE); break; @@ -1247,7 +1236,6 @@ add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) add_general (cinfo, ainfo, 2, TRUE); break; case MONO_TYPE_U2: - case MONO_TYPE_CHAR: add_general (cinfo, ainfo, 2, FALSE); break; case MONO_TYPE_I4: @@ -1260,11 +1248,7 @@ add_param (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t) case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_FNPTR: - case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: - case MONO_TYPE_SZARRAY: - case MONO_TYPE_ARRAY: - case MONO_TYPE_STRING: case MONO_TYPE_U8: case MONO_TYPE_I8: add_general (cinfo, ainfo, 8, FALSE); @@ -1359,6 +1343,7 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) /* Pass the argument address in the next register */ if (cinfo->gr >= PARAM_REGS) { ainfo->storage = ArgVtypeByRefOnStack; + cinfo->stack_usage = ALIGN_TO (cinfo->stack_usage, 8); ainfo->offset = cinfo->stack_usage; cinfo->stack_usage += 8; } else { @@ -1396,9 +1381,6 @@ dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig) { int i; - if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS) - return FALSE; - // FIXME: Add more cases switch (cinfo->ret.storage) { case ArgNone: @@ -1427,10 +1409,7 @@ dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig) case ArgInFRegR4: case ArgHFA: case ArgVtypeByRef: - break; case ArgOnStack: - if (ainfo->offset >= DYN_CALL_STACK_ARGS * sizeof (mgreg_t)) - return FALSE; break; default: return FALSE; @@ -1488,6 +1467,15 @@ mono_arch_dyn_call_free (MonoDynCallInfo *info) g_free (ainfo); } +int +mono_arch_dyn_call_get_buf_size (MonoDynCallInfo *info) +{ + ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info; + + g_assert (ainfo->cinfo->stack_usage % MONO_ARCH_FRAME_ALIGNMENT == 0); + return sizeof (DynCallArgs) + ainfo->cinfo->stack_usage; +} + static double bitcast_r4_to_r8 (float f) { @@ -1505,7 +1493,7 @@ bitcast_r8_to_r4 (double f) } void -mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len) +mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf) { ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info; DynCallArgs *p = (DynCallArgs*)buf; @@ -1514,12 +1502,11 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g CallInfo *cinfo = dinfo->cinfo; int buffer_offset = 0; - g_assert (buf_len >= sizeof (DynCallArgs)); - p->res = 0; p->ret = ret; p->n_fpargs = dinfo->n_fpargs; p->n_fpret = dinfo->n_fpret; + p->n_stackargs = cinfo->stack_usage / sizeof (mgreg_t); arg_index = 0; greg = 0; @@ -1554,7 +1541,6 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g /* Special case arguments smaller than 1 machine word */ switch (t->type) { - case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: *(guint8*)stack_arg = *(guint8*)arg; break; @@ -1562,7 +1548,6 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g *(gint8*)stack_arg = *(gint8*)arg; break; case MONO_TYPE_U2: - case MONO_TYPE_CHAR: *(guint16*)stack_arg = *(guint16*)arg; break; case MONO_TYPE_I2: @@ -1583,10 +1568,6 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g } switch (t->type) { - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - case MONO_TYPE_ARRAY: - case MONO_TYPE_SZARRAY: case MONO_TYPE_OBJECT: case MONO_TYPE_PTR: case MONO_TYPE_I: @@ -1595,7 +1576,6 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g case MONO_TYPE_U8: p->regs [slot] = (mgreg_t)*arg; break; - case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: p->regs [slot] = *(guint8*)arg; break; @@ -1606,7 +1586,6 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g p->regs [slot] = *(gint16*)arg; break; case MONO_TYPE_U2: - case MONO_TYPE_CHAR: p->regs [slot] = *(guint16*)arg; break; case MONO_TYPE_I4: @@ -1700,10 +1679,6 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) case MONO_TYPE_VOID: *(gpointer*)ret = NULL; break; - case MONO_TYPE_STRING: - case MONO_TYPE_CLASS: - case MONO_TYPE_ARRAY: - case MONO_TYPE_SZARRAY: case MONO_TYPE_OBJECT: case MONO_TYPE_I: case MONO_TYPE_U: @@ -1714,14 +1689,12 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf) *(gint8*)ret = res; break; case MONO_TYPE_U1: - case MONO_TYPE_BOOLEAN: *(guint8*)ret = res; break; case MONO_TYPE_I2: *(gint16*)ret = res; break; case MONO_TYPE_U2: - case MONO_TYPE_CHAR: *(guint16*)ret = res; break; case MONO_TYPE_I4: @@ -1789,25 +1762,21 @@ mono_arch_flush_icache (guint8 *code, gint size) * icache/dcache cache line sizes, that can vary between cores on * big.LITTLE architectures. */ guint64 end = (guint64) (code + size); - guint64 addr, ctr_el0; - static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff; - size_t isize, dsize; - - asm volatile ("mrs %0, ctr_el0" : "=r" (ctr_el0)); - isize = 4 << ((ctr_el0 >> 0 ) & 0xf); - dsize = 4 << ((ctr_el0 >> 16) & 0xf); - - /* determine the global minimum cache line size */ - icache_line_size = isize = MIN (icache_line_size, isize); - dcache_line_size = dsize = MIN (dcache_line_size, dsize); - - addr = (guint64) code & ~(guint64) (dsize - 1); - for (; addr < end; addr += dsize) + guint64 addr; + /* always go with cacheline size of 4 bytes as this code isn't perf critical + * anyway. Reading the cache line size from a machine register can be racy + * on a big.LITTLE architecture if the cores don't have the same cache line + * sizes. */ + const size_t icache_line_size = 4; + const size_t dcache_line_size = 4; + + addr = (guint64) code & ~(guint64) (dcache_line_size - 1); + for (; addr < end; addr += dcache_line_size) asm volatile("dc civac, %0" : : "r" (addr) : "memory"); asm volatile("dsb ish" : : : "memory"); - addr = (guint64) code & ~(guint64) (isize - 1); - for (; addr < end; addr += isize) + addr = (guint64) code & ~(guint64) (icache_line_size - 1); + for (; addr < end; addr += icache_line_size) asm volatile("ic ivau, %0" : : "r" (addr) : "memory"); asm volatile ("dsb ish" : : : "memory"); @@ -1917,9 +1886,6 @@ mono_arch_create_vars (MonoCompile *cfg) if (cfg->method->save_lmf) { cfg->create_lmf_var = TRUE; cfg->lmf_ir = TRUE; -#ifndef TARGET_MACH - cfg->lmf_ir_mono_lmf = TRUE; -#endif } } @@ -2869,7 +2835,7 @@ opcode_to_armcond (int opcode) } /* This clobbers LR */ -static inline __attribute__((warn_unused_result)) guint8* +static inline __attribute__ ((__warn_unused_result__)) guint8* emit_cond_exc (MonoCompile *cfg, guint8 *code, int opcode, const char *exc_name) { int cond; @@ -3644,20 +3610,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) case OP_STOREI8_MEMBASE_REG: code = emit_strx (code, sreg1, ins->inst_destbasereg, ins->inst_offset); break; - case OP_TLS_GET: code = emit_tls_get (code, dreg, ins->inst_offset); break; - case OP_TLS_GET_REG: - code = emit_tls_get_reg (code, dreg, sreg1); - break; case OP_TLS_SET: code = emit_tls_set (code, sreg1, ins->inst_offset); break; - case OP_TLS_SET_REG: - code = emit_tls_set_reg (code, sreg1, sreg2); - break; - /* Atomic */ case OP_MEMORY_BARRIER: arm_dmb (code, 0); @@ -4181,6 +4139,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) } else { mono_add_patch_info_rel (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method, MONO_R_ARM64_B); arm_b (code, code); + cfg->thunk_area += THUNK_SIZE; } ins->flags |= MONO_INST_GC_CALLSITE; ins->backend.pc_offset = code - cfg->native_code; @@ -4218,14 +4177,35 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) code = emit_ldrfpx (code, ARMREG_D0 + i, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, fpregs) + (i * 8)); arm_patch_rel (labels [0], code, MONO_R_ARM64_BCC); + /* Allocate callee area */ + code = emit_ldrx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); + arm_lslw (code, ARMREG_R0, ARMREG_R0, 3); + arm_movspx (code, ARMREG_R1, ARMREG_SP); + arm_subx (code, ARMREG_R1, ARMREG_R1, ARMREG_R0); + arm_movspx (code, ARMREG_SP, ARMREG_R1); + /* Set stack args */ - for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) { - code = emit_ldrx (code, ARMREG_R0, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + 1 + i) * sizeof (mgreg_t))); - code = emit_strx (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t)); - } + /* R1 = limit */ + code = emit_ldrx (code, ARMREG_R1, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, n_stackargs)); + /* R2 = pointer into 'regs' */ + code = emit_imm (code, ARMREG_R2, MONO_STRUCT_OFFSET (DynCallArgs, regs) + ((PARAM_REGS + 1) * sizeof (mgreg_t))); + arm_addx (code, ARMREG_R2, ARMREG_LR, ARMREG_R2); + /* R3 = pointer to stack */ + arm_movspx (code, ARMREG_R3, ARMREG_SP); + labels [0] = code; + arm_b (code, code); + labels [1] = code; + code = emit_ldrx (code, ARMREG_R5, ARMREG_R2, 0); + code = emit_strx (code, ARMREG_R5, ARMREG_R3, 0); + code = emit_addx_imm (code, ARMREG_R2, ARMREG_R2, sizeof (mgreg_t)); + code = emit_addx_imm (code, ARMREG_R3, ARMREG_R3, sizeof (mgreg_t)); + code = emit_subx_imm (code, ARMREG_R1, ARMREG_R1, 1); + arm_patch_rel (labels [0], code, MONO_R_ARM64_B); + arm_cmpw (code, ARMREG_R1, ARMREG_RZR); + arm_bcc (code, ARMCOND_GT, labels [1]); /* Set argument registers + r8 */ - code = mono_arm_emit_load_regarray (code, 0x1ff, ARMREG_LR, 0); + code = mono_arm_emit_load_regarray (code, 0x1ff, ARMREG_LR, MONO_STRUCT_OFFSET (DynCallArgs, regs)); /* Make the call */ arm_blrx (code, ARMREG_IP1); @@ -4246,18 +4226,13 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) } case OP_GENERIC_CLASS_INIT: { - static int byte_offset = -1; - static guint8 bitmask; + int byte_offset; guint8 *jump; - if (byte_offset < 0) - mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask); + byte_offset = MONO_STRUCT_OFFSET (MonoVTable, initialized); /* Load vtable->initialized */ arm_ldrsbx (code, ARMREG_IP0, sreg1, byte_offset); - // FIXME: No andx_imm yet */ - code = mono_arm_emit_imm64 (code, ARMREG_IP1, bitmask); - arm_andx (code, ARMREG_IP0, ARMREG_IP0, ARMREG_IP1); jump = code; arm_cbnzx (code, ARMREG_IP0, 0); @@ -4328,6 +4303,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) mono_add_patch_info_rel (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb, MONO_R_ARM64_BL); arm_bl (code, 0); cfg->thunk_area += THUNK_SIZE; + mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb); break; case OP_START_HANDLER: { MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region); @@ -4376,7 +4352,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb) #endif break; } - + case OP_FILL_PROF_CALL_CTX: + for (int i = 0; i < MONO_MAX_IREGS; i++) + if ((MONO_ARCH_CALLEE_SAVED_REGS & (1 << i)) || i == ARMREG_SP || i == ARMREG_FP) + arm_strx (code, i, ins->sreg1, MONO_STRUCT_OFFSET (MonoContext, regs) + i * sizeof (mgreg_t)); + break; default: g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__); g_assert_not_reached (); @@ -4510,7 +4490,7 @@ emit_move_args (MonoCompile *cfg, guint8 *code) * Emit code to store the registers in REGS into the appropriate elements of * the register array at BASEREG+OFFSET. */ -static __attribute__((warn_unused_result)) guint8* +static __attribute__ ((__warn_unused_result__)) guint8* emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) { int i; @@ -4537,7 +4517,7 @@ emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) * Emit code to load the registers in REGS from the appropriate elements of * the register array at BASEREG+OFFSET. */ -static __attribute__((warn_unused_result)) guint8* +static __attribute__ ((__warn_unused_result__)) guint8* emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) { int i; @@ -4568,7 +4548,7 @@ emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) * Emit code to store the registers in REGS into consecutive memory locations starting * at BASEREG+OFFSET. */ -static __attribute__((warn_unused_result)) guint8* +static __attribute__ ((__warn_unused_result__)) guint8* emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) { int i, pos; @@ -4598,7 +4578,7 @@ emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) * Emit code to load the registers in REGS from consecutive memory locations starting * at BASEREG+OFFSET. */ -static __attribute__((warn_unused_result)) guint8* +static __attribute__ ((__warn_unused_result__)) guint8* emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset) { int i, pos; @@ -4621,19 +4601,19 @@ emit_load_regset (guint8 *code, guint64 regs, int basereg, int offset) return code; } -__attribute__((warn_unused_result)) guint8* +__attribute__ ((__warn_unused_result__)) guint8* mono_arm_emit_load_regarray (guint8 *code, guint64 regs, int basereg, int offset) { return emit_load_regarray (code, regs, basereg, offset); } -__attribute__((warn_unused_result)) guint8* +__attribute__ ((__warn_unused_result__)) guint8* mono_arm_emit_store_regarray (guint8 *code, guint64 regs, int basereg, int offset) { return emit_store_regarray (code, regs, basereg, offset); } -__attribute__((warn_unused_result)) guint8* +__attribute__ ((__warn_unused_result__)) guint8* mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) { return emit_store_regset (code, regs, basereg, offset); @@ -4641,7 +4621,7 @@ mono_arm_emit_store_regset (guint8 *code, guint64 regs, int basereg, int offset) /* Same as emit_store_regset, but emit unwind info too */ /* CFA_OFFSET is the offset between the CFA and basereg */ -static __attribute__((warn_unused_result)) guint8* +static __attribute__ ((__warn_unused_result__)) guint8* emit_store_regset_cfa (MonoCompile *cfg, guint8 *code, guint64 regs, int basereg, int offset, int cfa_offset, guint64 no_cfa_regset) { int i, j, pos, nregs; @@ -4973,12 +4953,6 @@ mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMetho return NULL; } -gboolean -mono_arch_print_tree (MonoInst *tree, int arity) -{ - return FALSE; -} - guint32 mono_arch_get_patch_offset (guint8 *code) { @@ -5287,22 +5261,3 @@ mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig) return get_call_info (mp, sig); } -gpointer -mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value) -{ - gpointer *lr_loc; - char *old_value; - char *bp; - - /*Load the spvar*/ - bp = MONO_CONTEXT_GET_BP (ctx); - lr_loc = (gpointer*)(bp + clause->exvar_offset); - - old_value = *lr_loc; - if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size)) - return old_value; - - *lr_loc = new_value; - - return old_value; -}