X-Git-Url: http://wien.tomnetworks.com/gitweb/?a=blobdiff_plain;f=mono%2Fmini%2Fsimd-intrinsics.c;h=d7ddc17f17a128337679b6b55a096a47beb1fba3;hb=568792876151c97e32fc7facc17ec4314915008a;hp=685d9b0fa264fb6977413d06da8c14894510471a;hpb=217ddc29c40bc8b11f8fbd4800e61db7f4f22bbf;p=mono.git diff --git a/mono/mini/simd-intrinsics.c b/mono/mini/simd-intrinsics.c index 685d9b0fa26..d7ddc17f17a 100644 --- a/mono/mini/simd-intrinsics.c +++ b/mono/mini/simd-intrinsics.c @@ -33,7 +33,9 @@ TODO pass simd arguments in registers or, at least, add SSE support for pushing TODO pass simd args byval to a non-intrinsic method cause some useless local var load/store to happen. TODO check if we need to init the SSE control word with better precision. TODO add support for 3 reg sources in mini without slowing the common path. Or find a way to make MASKMOVDQU work. -TODO make SimdRuntime.get_AccelMode work under AOT +TODO make SimdRuntime.get_AccelMode work under AOT +TODO patterns such as "a ^= b" generate slower code as the LDADDR op will be copied to a tmp first. Look at adding a indirection reduction pass after the dce pass. +TODO extend bounds checking code to support for range checking. General notes for SIMD intrinsics. @@ -73,6 +75,7 @@ enum { SIMD_EMIT_CAST, SIMD_EMIT_SHUFFLE, SIMD_EMIT_SHIFT, + SIMD_EMIT_EQUALITY, SIMD_EMIT_LOAD_ALIGNED, SIMD_EMIT_STORE, SIMD_EMIT_EXTRACT_MASK, @@ -123,12 +126,8 @@ typedef struct { guint8 flags; } SimdIntrinsc; -/* -Missing: -setters - */ static const SimdIntrinsc vector4f_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_R4, SIMD_EMIT_CTOR }, { SN_AddSub, OP_ADDSUBPS, SIMD_EMIT_BINARY, SIMD_VERSION_SSE3 }, { SN_AndNot, OP_ANDNPS, SIMD_EMIT_BINARY }, { SN_CompareEqual, OP_COMPPS, SIMD_EMIT_BINARY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, @@ -166,18 +165,20 @@ static const SimdIntrinsc vector4f_intrinsics[] = { { SN_op_BitwiseAnd, OP_ANDPS, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_ORPS, SIMD_EMIT_BINARY }, { SN_op_Division, OP_DIVPS, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_COMPPS, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_XORPS, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_COMPPS, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_Multiply, OP_MULPS, SIMD_EMIT_BINARY }, { SN_op_Subtraction, OP_SUBPS, SIMD_EMIT_BINARY }, + { SN_set_W, 3, SIMD_EMIT_SETTER }, + { SN_set_X, 0, SIMD_EMIT_SETTER }, + { SN_set_Y, 1, SIMD_EMIT_SETTER }, + { SN_set_Z, 2, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector2d_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_R8, SIMD_EMIT_CTOR }, { SN_AddSub, OP_ADDSUBPD, SIMD_EMIT_BINARY, SIMD_VERSION_SSE3 }, { SN_AndNot, OP_ANDNPD, SIMD_EMIT_BINARY }, { SN_CompareEqual, OP_COMPPD, SIMD_EMIT_BINARY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, @@ -211,16 +212,13 @@ static const SimdIntrinsc vector2d_intrinsics[] = { { SN_op_Explicit, 0, SIMD_EMIT_CAST }, { SN_op_Multiply, OP_MULPD, SIMD_EMIT_BINARY }, { SN_op_Subtraction, OP_SUBPD, SIMD_EMIT_BINARY }, + { SN_set_X, 0, SIMD_EMIT_SETTER }, + { SN_set_Y, 1, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector2ul_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I8, SIMD_EMIT_CTOR }, { SN_CompareEqual, OP_PCMPEQQ, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, - { SN_ExtractByteMask, 0, SIMD_EMIT_EXTRACT_MASK }, { SN_LoadAligned, 0, SIMD_EMIT_LOAD_ALIGNED }, { SN_PrefetchTemporalAllCacheLevels, 0, SIMD_EMIT_PREFETCH, SIMD_VERSION_SSE1, SIMD_PREFETCH_MODE_0 }, { SN_PrefetchTemporal1stLevelCache, 0, SIMD_EMIT_PREFETCH, SIMD_VERSION_SSE1, SIMD_PREFETCH_MODE_1 }, @@ -240,17 +238,14 @@ static const SimdIntrinsc vector2ul_intrinsics[] = { { SN_op_Multiply, OP_PMULQ, SIMD_EMIT_BINARY }, { SN_op_RightShift, OP_PSHRQ, SIMD_EMIT_SHIFT }, { SN_op_Subtraction, OP_PSUBQ, SIMD_EMIT_BINARY }, + { SN_set_X, 0, SIMD_EMIT_SETTER }, + { SN_set_Y, 1, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector2l_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I8, SIMD_EMIT_CTOR }, { SN_CompareEqual, OP_PCMPEQQ, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, { SN_CompareGreaterThan, OP_PCMPGTQ, SIMD_EMIT_BINARY, SIMD_VERSION_SSE42 }, - { SN_ExtractByteMask, 0, SIMD_EMIT_EXTRACT_MASK }, { SN_LoadAligned, 0, SIMD_EMIT_LOAD_ALIGNED }, { SN_LogicalRightShift, OP_PSHRQ, SIMD_EMIT_SHIFT }, { SN_PrefetchTemporalAllCacheLevels, 0, SIMD_EMIT_PREFETCH, SIMD_VERSION_SSE1, SIMD_PREFETCH_MODE_0 }, @@ -270,17 +265,14 @@ static const SimdIntrinsc vector2l_intrinsics[] = { { SN_op_LeftShift, OP_PSHLQ, SIMD_EMIT_SHIFT }, { SN_op_Multiply, OP_PMULQ, SIMD_EMIT_BINARY }, { SN_op_Subtraction, OP_PSUBQ, SIMD_EMIT_BINARY }, + { SN_set_X, 0, SIMD_EMIT_SETTER }, + { SN_set_Y, 1, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector4ui_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I4, SIMD_EMIT_CTOR }, { SN_ArithmeticRightShift, OP_PSARD, SIMD_EMIT_SHIFT }, { SN_CompareEqual, OP_PCMPEQD, SIMD_EMIT_BINARY }, - { SN_ExtractByteMask, 0, SIMD_EMIT_EXTRACT_MASK }, { SN_LoadAligned, 0, SIMD_EMIT_LOAD_ALIGNED }, { SN_Max, OP_PMAXD_UN, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, { SN_Min, OP_PMIND_UN, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, @@ -301,23 +293,24 @@ static const SimdIntrinsc vector4ui_intrinsics[] = { { SN_op_Addition, OP_PADDD, SIMD_EMIT_BINARY }, { SN_op_BitwiseAnd, OP_PAND, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_POR, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_PCMPEQD, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_PXOR, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_PCMPEQD, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_LeftShift, OP_PSHLD, SIMD_EMIT_SHIFT }, { SN_op_Multiply, OP_PMULD, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, { SN_op_RightShift, OP_PSHRD, SIMD_EMIT_SHIFT }, { SN_op_Subtraction, OP_PSUBD, SIMD_EMIT_BINARY }, + { SN_set_W, 3, SIMD_EMIT_SETTER }, + { SN_set_X, 0, SIMD_EMIT_SETTER }, + { SN_set_Y, 1, SIMD_EMIT_SETTER }, + { SN_set_Z, 2, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector4i_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I4, SIMD_EMIT_CTOR }, { SN_CompareEqual, OP_PCMPEQD, SIMD_EMIT_BINARY }, { SN_CompareGreaterThan, OP_PCMPGTD, SIMD_EMIT_BINARY }, - { SN_ExtractByteMask, 0, SIMD_EMIT_EXTRACT_MASK }, { SN_LoadAligned, 0, SIMD_EMIT_LOAD_ALIGNED }, { SN_LogicalRightShift, OP_PSHRD, SIMD_EMIT_SHIFT }, { SN_Max, OP_PMAXD, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, @@ -339,25 +332,26 @@ static const SimdIntrinsc vector4i_intrinsics[] = { { SN_op_Addition, OP_PADDD, SIMD_EMIT_BINARY }, { SN_op_BitwiseAnd, OP_PAND, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_POR, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_PCMPEQD, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_PXOR, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_PCMPEQD, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_LeftShift, OP_PSHLD, SIMD_EMIT_SHIFT }, { SN_op_Multiply, OP_PMULD, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, { SN_op_RightShift, OP_PSARD, SIMD_EMIT_SHIFT }, { SN_op_Subtraction, OP_PSUBD, SIMD_EMIT_BINARY }, + { SN_set_W, 3, SIMD_EMIT_SETTER }, + { SN_set_X, 0, SIMD_EMIT_SETTER }, + { SN_set_Y, 1, SIMD_EMIT_SETTER }, + { SN_set_Z, 2, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector8us_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I2, SIMD_EMIT_CTOR }, { SN_AddWithSaturation, OP_PADDW_SAT_UN, SIMD_EMIT_BINARY }, { SN_ArithmeticRightShift, OP_PSARW, SIMD_EMIT_SHIFT }, { SN_Average, OP_PAVGW_UN, SIMD_EMIT_BINARY }, { SN_CompareEqual, OP_PCMPEQW, SIMD_EMIT_BINARY }, - { SN_ExtractByteMask, 0, SIMD_EMIT_EXTRACT_MASK }, { SN_LoadAligned, 0, SIMD_EMIT_LOAD_ALIGNED }, { SN_Max, OP_PMAXW_UN, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, { SN_Min, OP_PMINW_UN, SIMD_EMIT_BINARY, SIMD_VERSION_SSE41 }, @@ -385,20 +379,29 @@ static const SimdIntrinsc vector8us_intrinsics[] = { { SN_op_Addition, OP_PADDW, SIMD_EMIT_BINARY }, { SN_op_BitwiseAnd, OP_PAND, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_POR, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_PCMPEQW, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_PXOR, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_PCMPEQW, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_LeftShift, OP_PSHLW, SIMD_EMIT_SHIFT }, { SN_op_Multiply, OP_PMULW, SIMD_EMIT_BINARY }, { SN_op_RightShift, OP_PSHRW, SIMD_EMIT_SHIFT }, { SN_op_Subtraction, OP_PSUBW, SIMD_EMIT_BINARY }, + { SN_set_V0, 0, SIMD_EMIT_SETTER }, + { SN_set_V1, 1, SIMD_EMIT_SETTER }, + { SN_set_V2, 2, SIMD_EMIT_SETTER }, + { SN_set_V3, 3, SIMD_EMIT_SETTER }, + { SN_set_V4, 4, SIMD_EMIT_SETTER }, + { SN_set_V5, 5, SIMD_EMIT_SETTER }, + { SN_set_V6, 6, SIMD_EMIT_SETTER }, + { SN_set_V7, 7, SIMD_EMIT_SETTER }, }; static const SimdIntrinsc vector8s_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I2, SIMD_EMIT_CTOR }, { SN_AddWithSaturation, OP_PADDW_SAT, SIMD_EMIT_BINARY }, { SN_CompareEqual, OP_PCMPEQW, SIMD_EMIT_BINARY }, { SN_CompareGreaterThan, OP_PCMPGTW, SIMD_EMIT_BINARY }, - { SN_ExtractByteMask, 0, SIMD_EMIT_EXTRACT_MASK }, { SN_LoadAligned, 0, SIMD_EMIT_LOAD_ALIGNED }, { SN_LogicalRightShift, OP_PSHRW, SIMD_EMIT_SHIFT }, { SN_Max, OP_PMAXW, SIMD_EMIT_BINARY }, @@ -427,8 +430,10 @@ static const SimdIntrinsc vector8s_intrinsics[] = { { SN_op_Addition, OP_PADDW, SIMD_EMIT_BINARY }, { SN_op_BitwiseAnd, OP_PAND, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_POR, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_PCMPEQW, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_PXOR, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_PCMPEQW, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_LeftShift, OP_PSHLW, SIMD_EMIT_SHIFT }, { SN_op_Multiply, OP_PMULW, SIMD_EMIT_BINARY }, { SN_op_RightShift, OP_PSARW, SIMD_EMIT_SHIFT }, @@ -443,12 +448,8 @@ static const SimdIntrinsc vector8s_intrinsics[] = { { SN_set_V7, 7, SIMD_EMIT_SETTER }, }; -/* -Missing: -setters - */ static const SimdIntrinsc vector16b_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I1, SIMD_EMIT_CTOR }, { SN_AddWithSaturation, OP_PADDB_SAT_UN, SIMD_EMIT_BINARY }, { SN_Average, OP_PAVGB_UN, SIMD_EMIT_BINARY }, { SN_CompareEqual, OP_PCMPEQB, SIMD_EMIT_BINARY }, @@ -484,9 +485,27 @@ static const SimdIntrinsc vector16b_intrinsics[] = { { SN_op_Addition, OP_PADDB, SIMD_EMIT_BINARY }, { SN_op_BitwiseAnd, OP_PAND, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_POR, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_PCMPEQB, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_PXOR, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_PCMPEQB, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_Subtraction, OP_PSUBB, SIMD_EMIT_BINARY }, + { SN_set_V0, 0, SIMD_EMIT_SETTER }, + { SN_set_V1, 1, SIMD_EMIT_SETTER }, + { SN_set_V10, 10, SIMD_EMIT_SETTER }, + { SN_set_V11, 11, SIMD_EMIT_SETTER }, + { SN_set_V12, 12, SIMD_EMIT_SETTER }, + { SN_set_V13, 13, SIMD_EMIT_SETTER }, + { SN_set_V14, 14, SIMD_EMIT_SETTER }, + { SN_set_V15, 15, SIMD_EMIT_SETTER }, + { SN_set_V2, 2, SIMD_EMIT_SETTER }, + { SN_set_V3, 3, SIMD_EMIT_SETTER }, + { SN_set_V4, 4, SIMD_EMIT_SETTER }, + { SN_set_V5, 5, SIMD_EMIT_SETTER }, + { SN_set_V6, 6, SIMD_EMIT_SETTER }, + { SN_set_V7, 7, SIMD_EMIT_SETTER }, + { SN_set_V8, 8, SIMD_EMIT_SETTER }, + { SN_set_V9, 9, SIMD_EMIT_SETTER }, }; /* @@ -494,7 +513,7 @@ Missing: setters */ static const SimdIntrinsc vector16sb_intrinsics[] = { - { SN_ctor, 0, SIMD_EMIT_CTOR }, + { SN_ctor, OP_EXPAND_I1, SIMD_EMIT_CTOR }, { SN_AddWithSaturation, OP_PADDB_SAT, SIMD_EMIT_BINARY }, { SN_CompareEqual, OP_PCMPEQB, SIMD_EMIT_BINARY }, { SN_CompareGreaterThan, OP_PCMPGTB, SIMD_EMIT_BINARY }, @@ -529,9 +548,27 @@ static const SimdIntrinsc vector16sb_intrinsics[] = { { SN_op_Addition, OP_PADDB, SIMD_EMIT_BINARY }, { SN_op_BitwiseAnd, OP_PAND, SIMD_EMIT_BINARY }, { SN_op_BitwiseOr, OP_POR, SIMD_EMIT_BINARY }, + { SN_op_Equality, OP_PCMPEQB, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_EQ }, { SN_op_ExclusiveOr, OP_PXOR, SIMD_EMIT_BINARY }, { SN_op_Explicit, 0, SIMD_EMIT_CAST }, + { SN_op_Inequality, OP_PCMPEQB, SIMD_EMIT_EQUALITY, SIMD_VERSION_SSE1, SIMD_COMP_NEQ }, { SN_op_Subtraction, OP_PSUBB, SIMD_EMIT_BINARY }, + { SN_set_V0, 0, SIMD_EMIT_SETTER }, + { SN_set_V1, 1, SIMD_EMIT_SETTER }, + { SN_set_V10, 10, SIMD_EMIT_SETTER }, + { SN_set_V11, 11, SIMD_EMIT_SETTER }, + { SN_set_V12, 12, SIMD_EMIT_SETTER }, + { SN_set_V13, 13, SIMD_EMIT_SETTER }, + { SN_set_V14, 14, SIMD_EMIT_SETTER }, + { SN_set_V15, 15, SIMD_EMIT_SETTER }, + { SN_set_V2, 2, SIMD_EMIT_SETTER }, + { SN_set_V3, 3, SIMD_EMIT_SETTER }, + { SN_set_V4, 4, SIMD_EMIT_SETTER }, + { SN_set_V5, 5, SIMD_EMIT_SETTER }, + { SN_set_V6, 6, SIMD_EMIT_SETTER }, + { SN_set_V7, 7, SIMD_EMIT_SETTER }, + { SN_set_V8, 8, SIMD_EMIT_SETTER }, + { SN_set_V9, 9, SIMD_EMIT_SETTER }, }; static guint32 simd_supported_versions; @@ -639,6 +676,9 @@ mono_simd_simplify_indirection (MonoCompile *cfg) /*Scan the first basic block looking xzeros not used*/ for (ins = first_bb->code; ins; ins = ins->next) { + int num_sregs; + int sregs [MONO_MAX_SRC_REGS]; + if (ins->opcode == OP_XZERO) { if (!(vreg_flags [ins->dreg] & VREG_HAS_OTHER_OP_BB0)) { DEBUG (printf ("[simd-simplify] R%d has vzero: ", ins->dreg); mono_print_ins(ins)); @@ -648,13 +688,13 @@ mono_simd_simplify_indirection (MonoCompile *cfg) } if (ins->opcode == OP_LDADDR && apply_vreg_first_block_interference (cfg, ins, ((MonoInst*)ins->inst_p0)->dreg, max_vreg, vreg_flags)) continue; - if (apply_vreg_first_block_interference (cfg, ins, ins->dreg, max_vreg, vreg_flags)) continue; - if (apply_vreg_first_block_interference (cfg, ins, ins->sreg1, max_vreg, vreg_flags)) - continue; - if (apply_vreg_first_block_interference (cfg, ins, ins->sreg2, max_vreg, vreg_flags)) - continue; + num_sregs = mono_inst_get_src_registers (ins, sregs); + for (i = 0; i < num_sregs; ++i) { + if (apply_vreg_first_block_interference (cfg, ins, sregs [i], max_vreg, vreg_flags)) + break; + } } if (IS_DEBUG_ON (cfg)) { @@ -684,15 +724,19 @@ mono_simd_simplify_indirection (MonoCompile *cfg) for (bb = first_bb->next_bb; bb; bb = bb->next_bb) { for (ins = bb->code; ins; ins = ins->next) { - + int num_sregs; + int sregs [MONO_MAX_SRC_REGS]; + if (ins->opcode == OP_LDADDR && apply_vreg_following_block_interference (cfg, ins, ((MonoInst*)ins->inst_p0)->dreg, bb, max_vreg, vreg_flags, target_bb)) continue; if (apply_vreg_following_block_interference (cfg, ins, ins->dreg, bb, max_vreg, vreg_flags, target_bb)) continue; - if (apply_vreg_following_block_interference (cfg, ins, ins->sreg1, bb, max_vreg, vreg_flags, target_bb)) - continue; - if (apply_vreg_following_block_interference (cfg, ins, ins->sreg2, bb, max_vreg, vreg_flags, target_bb)) - continue; + num_sregs = mono_inst_get_src_registers (ins, sregs); + for (i = 0; i < num_sregs; ++i) { + if (apply_vreg_following_block_interference (cfg, ins, sregs [i], bb, + max_vreg, vreg_flags, target_bb)) + continue; + } } } @@ -708,10 +752,19 @@ mono_simd_simplify_indirection (MonoCompile *cfg) if (!(vreg_flags [var->dreg] & VREG_SINGLE_BB_USE)) continue; for (ins = target_bb [var->dreg]->code; ins; ins = ins->next) { + int num_sregs, j; + int sregs [MONO_MAX_SRC_REGS]; + gboolean found = FALSE; + + num_sregs = mono_inst_get_src_registers (ins, sregs); + for (j = 0; j < num_sregs; ++j) { + if (sregs [i] == var->dreg) + found = TRUE; + } /*We can avoid inserting the XZERO if the first use doesn't depend on the zero'ed value.*/ - if (ins->dreg == var->dreg && ins->sreg1 != var->dreg && ins->sreg2 != var->dreg) { + if (ins->dreg == var->dreg && !found) { break; - } else if (ins->sreg1 == var->dreg || ins->sreg2 == var->dreg) { + } else if (found) { MonoInst *tmp; MONO_INST_NEW (cfg, tmp, OP_XZERO); tmp->dreg = var->dreg; @@ -886,21 +939,70 @@ mono_type_elements_shift_bits (MonoType *type) g_assert_not_reached (); } +static int +mono_type_to_slow_insert_op (MonoType *type) +{ + switch (type->type) { + case MONO_TYPE_I1: + case MONO_TYPE_U1: + return OP_INSERTX_U1_SLOW; + case MONO_TYPE_I2: + case MONO_TYPE_U2: + return OP_INSERT_I2; + case MONO_TYPE_I4: + case MONO_TYPE_U4: + return OP_INSERTX_I4_SLOW; + case MONO_TYPE_I8: + case MONO_TYPE_U8: + return OP_INSERTX_I8_SLOW; + case MONO_TYPE_R4: + return OP_INSERTX_R4_SLOW; + case MONO_TYPE_R8: + return OP_INSERTX_R8_SLOW; + } + g_assert_not_reached (); +} static MonoInst* simd_intrinsic_emit_setter (const SimdIntrinsc *intrinsic, MonoCompile *cfg, MonoMethod *cmethod, MonoInst **args) { MonoInst *ins; + MonoMethodSignature *sig = mono_method_signature (cmethod); + int size, align; + size = mono_type_size (sig->params [0], &align); - MONO_INST_NEW (cfg, ins, OP_INSERT_I2); - ins->klass = cmethod->klass; - /*This is a partial load so we encode the dependency on the previous value by setting dreg and sreg1 to the same value.*/ - ins->dreg = ins->sreg1 = load_simd_vreg (cfg, cmethod, args [0]); - ins->type = STACK_I4; - ins->sreg2 = args [1]->dreg; - ins->inst_c0 = intrinsic->opcode; - MONO_ADD_INS (cfg->cbb, ins); + if (size == 2 || size == 4 || size == 8) { + MONO_INST_NEW (cfg, ins, mono_type_to_slow_insert_op (sig->params [0])); + ins->klass = cmethod->klass; + /*This is a partial load so we encode the dependency on the previous value by setting dreg and sreg1 to the same value.*/ + ins->dreg = ins->sreg1 = load_simd_vreg (cfg, cmethod, args [0]); + ins->sreg2 = args [1]->dreg; + ins->inst_c0 = intrinsic->opcode; + if (sig->params [0]->type == MONO_TYPE_R4) + ins->backend.spill_var = get_int_to_float_spill_area (cfg); + else if (sig->params [0]->type == MONO_TYPE_R8) + ins->backend.spill_var = get_double_spill_area (cfg); + MONO_ADD_INS (cfg->cbb, ins); + } else { + int vreg, sreg; + + MONO_INST_NEW (cfg, ins, OP_EXTRACTX_U2); + ins->klass = cmethod->klass; + ins->sreg1 = sreg = load_simd_vreg (cfg, cmethod, args [0]); + ins->type = STACK_I4; + ins->dreg = vreg = alloc_ireg (cfg); + ins->inst_c0 = intrinsic->opcode / 2; + MONO_ADD_INS (cfg->cbb, ins); + MONO_INST_NEW (cfg, ins, OP_INSERTX_U1_SLOW); + ins->klass = cmethod->klass; + ins->sreg1 = vreg; + ins->sreg2 = args [1]->dreg; + ins->dreg = sreg; + ins->inst_c0 = intrinsic->opcode; + MONO_ADD_INS (cfg->cbb, ins); + + } return ins; } @@ -979,6 +1081,38 @@ simd_intrinsic_emit_ctor (const SimdIntrinsc *intrinsic, MonoCompile *cfg, MonoM int store_op = mono_type_to_store_membase (cfg, sig->params [0]); int arg_size = mono_type_size (sig->params [0], &i); + if (sig->param_count == 1) { + int dreg; + + if (is_ldaddr) { + dreg = args [0]->inst_i0->dreg; + NULLIFY_INS (args [0]); + } else { + g_assert (args [0]->type == STACK_MP || args [0]->type == STACK_PTR); + dreg = alloc_ireg (cfg); + } + + MONO_INST_NEW (cfg, ins, intrinsic->opcode); + ins->klass = cmethod->klass; + ins->sreg1 = args [1]->dreg; + ins->type = STACK_VTYPE; + ins->dreg = dreg; + + MONO_ADD_INS (cfg->cbb, ins); + if (sig->params [0]->type == MONO_TYPE_R4) + ins->backend.spill_var = get_int_to_float_spill_area (cfg); + else if (sig->params [0]->type == MONO_TYPE_R8) + ins->backend.spill_var = get_double_spill_area (cfg); + + if (!is_ldaddr) { + MONO_INST_NEW (cfg, ins, OP_STOREX_MEMBASE); + ins->dreg = args [0]->dreg; + ins->sreg1 = dreg; + MONO_ADD_INS (cfg->cbb, ins); + } + return ins; + } + if (is_ldaddr) { NEW_VARLOADA (cfg, ins, get_simd_ctor_spill_area (cfg, cmethod->klass), &cmethod->klass->byref_arg); MONO_ADD_INS (cfg->cbb, ins); @@ -1025,7 +1159,6 @@ simd_intrinsic_emit_cast (const SimdIntrinsc *intrinsic, MonoCompile *cfg, MonoM } static MonoInst* - simd_intrinsic_emit_shift (const SimdIntrinsc *intrinsic, MonoCompile *cfg, MonoMethod *cmethod, MonoInst **args) { MonoInst *ins; @@ -1060,6 +1193,52 @@ simd_intrinsic_emit_shift (const SimdIntrinsc *intrinsic, MonoCompile *cfg, Mono return ins; } +static inline gboolean +mono_op_is_packed_compare (int op) +{ + return op >= OP_PCMPEQB && op <= OP_PCMPEQQ; +} + +static MonoInst* +simd_intrinsic_emit_equality (const SimdIntrinsc *intrinsic, MonoCompile *cfg, MonoMethod *cmethod, MonoInst **args) +{ + MonoInst* ins; + int left_vreg, right_vreg, tmp_vreg; + + left_vreg = get_simd_vreg (cfg, cmethod, args [0]); + right_vreg = get_simd_vreg (cfg, cmethod, args [1]); + + + MONO_INST_NEW (cfg, ins, intrinsic->opcode); + ins->klass = cmethod->klass; + ins->sreg1 = left_vreg; + ins->sreg2 = right_vreg; + ins->type = STACK_VTYPE; + ins->klass = cmethod->klass; + ins->dreg = tmp_vreg = alloc_ireg (cfg); + ins->inst_c0 = intrinsic->flags; + MONO_ADD_INS (cfg->cbb, ins); + + /*FIXME the next ops are SSE specific*/ + MONO_INST_NEW (cfg, ins, OP_EXTRACT_MASK); + ins->klass = cmethod->klass; + ins->sreg1 = tmp_vreg; + ins->type = STACK_I4; + ins->dreg = tmp_vreg = alloc_ireg (cfg); + MONO_ADD_INS (cfg->cbb, ins); + + /*FP ops have a not equal instruction, which means that we must test the results with OR semantics.*/ + if (mono_op_is_packed_compare (intrinsic->opcode) || intrinsic->flags == SIMD_COMP_EQ) { + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_vreg, 0xFFFF); + NEW_UNALU (cfg, ins, intrinsic->flags == SIMD_COMP_EQ ? OP_CEQ : OP_CLT_UN, tmp_vreg, -1); + } else { + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_vreg, 0); + NEW_UNALU (cfg, ins, OP_CGT_UN, tmp_vreg, -1); + } + MONO_ADD_INS (cfg->cbb, ins); + return ins; +} + static MonoInst* simd_intrinsic_emit_shuffle (const SimdIntrinsc *intrinsic, MonoCompile *cfg, MonoMethod *cmethod, MonoInst **args) @@ -1212,6 +1391,8 @@ emit_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsi return simd_intrinsic_emit_shuffle (result, cfg, cmethod, args); case SIMD_EMIT_SHIFT: return simd_intrinsic_emit_shift (result, cfg, cmethod, args); + case SIMD_EMIT_EQUALITY: + return simd_intrinsic_emit_equality (result, cfg, cmethod, args); case SIMD_EMIT_LOAD_ALIGNED: return simd_intrinsic_emit_load_aligned (result, cfg, cmethod, args); case SIMD_EMIT_STORE: @@ -1224,6 +1405,87 @@ emit_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsi g_assert_not_reached (); } +static int +mono_emit_vector_ldelema (MonoCompile *cfg, MonoType *array_type, MonoInst *arr, MonoInst *index, gboolean check_bounds) +{ + MonoInst *ins; + guint32 size; + int mult_reg, add_reg, array_reg, index_reg, index2_reg, index3_reg; + + size = mono_array_element_size (mono_class_from_mono_type (array_type)); + mult_reg = alloc_preg (cfg); + array_reg = arr->dreg; + index_reg = index->dreg; + +#if SIZEOF_VOID_P == 8 + /* The array reg is 64 bits but the index reg is only 32 */ + index2_reg = alloc_preg (cfg); + MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg); +#else + index2_reg = index_reg; +#endif + index3_reg = alloc_preg (cfg); + + if (check_bounds) { + MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg); + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PADD_IMM, index3_reg, index2_reg, 16 / size - 1); + MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index3_reg); + } + + add_reg = alloc_preg (cfg); + + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size); + MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg); + NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector)); + ins->type = STACK_PTR; + MONO_ADD_INS (cfg->cbb, ins); + + return add_reg; +} + +static MonoInst* +emit_array_extension_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) +{ + if (!strcmp ("GetVector", cmethod->name) || !strcmp ("GetVectorAligned", cmethod->name)) { + MonoInst *load; + int addr = mono_emit_vector_ldelema (cfg, fsig->params [0], args [0], args [1], TRUE); + + MONO_INST_NEW (cfg, load, !strcmp ("GetVectorAligned", cmethod->name) ? OP_LOADX_ALIGNED_MEMBASE : OP_LOADX_MEMBASE ); + load->klass = cmethod->klass; + load->sreg1 = addr; + load->type = STACK_VTYPE; + load->dreg = alloc_ireg (cfg); + MONO_ADD_INS (cfg->cbb, load); + + return load; + } + if (!strcmp ("SetVector", cmethod->name) || !strcmp ("SetVectorAligned", cmethod->name)) { + MonoInst *store; + int vreg = get_simd_vreg (cfg, cmethod, args [1]); + int addr = mono_emit_vector_ldelema (cfg, fsig->params [0], args [0], args [2], TRUE); + + MONO_INST_NEW (cfg, store, !strcmp ("SetVectorAligned", cmethod->name) ? OP_STOREX_ALIGNED_MEMBASE_REG : OP_STOREX_MEMBASE); + store->klass = cmethod->klass; + store->dreg = addr; + store->sreg1 = vreg; + MONO_ADD_INS (cfg->cbb, store); + + return store; + } + if (!strcmp ("IsAligned", cmethod->name)) { + MonoInst *ins; + int addr = mono_emit_vector_ldelema (cfg, fsig->params [0], args [0], args [1], FALSE); + + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, addr, addr, 15); + MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, addr, 0); + NEW_UNALU (cfg, ins, OP_CEQ, addr, -1); + MONO_ADD_INS (cfg->cbb, ins); + + return ins; + } + return NULL; +} + static MonoInst* emit_simd_runtime_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { @@ -1238,30 +1500,45 @@ emit_simd_runtime_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodS MonoInst* mono_emit_simd_intrinsics (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) { - if (!strcmp ("Mono.Simd", cmethod->klass->name_space) && !strcmp ("SimdRuntime", cmethod->klass->name)) + const char *class_name; + + if (strcmp ("Mono.Simd", cmethod->klass->name_space)) + return NULL; + + class_name = cmethod->klass->name; + if (!strcmp ("SimdRuntime", class_name)) return emit_simd_runtime_intrinsics (cfg, cmethod, fsig, args); - if (!cmethod->klass->simd_type) + + if (!strcmp ("ArrayExtensions", class_name)) + return emit_array_extension_intrinsics (cfg, cmethod, fsig, args); + + if (!strcmp ("VectorOperations", class_name)) { + if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) + return NULL; + class_name = mono_class_from_mono_type (mono_method_signature (cmethod)->params [0])->name; + } else if (!cmethod->klass->simd_type) return NULL; + cfg->uses_simd_intrinsics = 1; - if (!strcmp ("Vector2d", cmethod->klass->name)) + if (!strcmp ("Vector2d", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector2d_intrinsics, sizeof (vector2d_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector4f", cmethod->klass->name)) + if (!strcmp ("Vector4f", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector4f_intrinsics, sizeof (vector4f_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector2ul", cmethod->klass->name)) + if (!strcmp ("Vector2ul", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector2ul_intrinsics, sizeof (vector2ul_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector2l", cmethod->klass->name)) + if (!strcmp ("Vector2l", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector2l_intrinsics, sizeof (vector2l_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector4ui", cmethod->klass->name)) + if (!strcmp ("Vector4ui", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector4ui_intrinsics, sizeof (vector4ui_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector4i", cmethod->klass->name)) + if (!strcmp ("Vector4i", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector4i_intrinsics, sizeof (vector4i_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector8us", cmethod->klass->name)) + if (!strcmp ("Vector8us", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector8us_intrinsics, sizeof (vector8us_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector8s", cmethod->klass->name)) + if (!strcmp ("Vector8s", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector8s_intrinsics, sizeof (vector8s_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector16b", cmethod->klass->name)) + if (!strcmp ("Vector16b", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector16b_intrinsics, sizeof (vector16b_intrinsics) / sizeof (SimdIntrinsc)); - if (!strcmp ("Vector16sb", cmethod->klass->name)) + if (!strcmp ("Vector16sb", class_name)) return emit_intrinsics (cfg, cmethod, fsig, args, vector16sb_intrinsics, sizeof (vector16sb_intrinsics) / sizeof (SimdIntrinsc)); return NULL;