diff options
Diffstat (limited to 'test/CodeGen/X86/vector-shuffle-combining.ll')
-rw-r--r-- | test/CodeGen/X86/vector-shuffle-combining.ll | 99 |
1 files changed, 86 insertions, 13 deletions
diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll index 75ce9753525b..266a3658eda9 100644 --- a/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/test/CodeGen/X86/vector-shuffle-combining.ll @@ -96,10 +96,15 @@ define <4 x i32> @combine_pshufd6(<4 x i32> %a) { ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: retq ; -; AVX-LABEL: combine_pshufd6: -; AVX: # BB#0: # %entry -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] -; AVX-NEXT: retq +; AVX1-LABEL: combine_pshufd6: +; AVX1: # BB#0: # %entry +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_pshufd6: +; AVX2: # BB#0: # %entry +; AVX2-NEXT: vbroadcastss %xmm0, %xmm0 +; AVX2-NEXT: retq entry: %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0) %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8) @@ -1783,13 +1788,13 @@ define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) { ; SSE-LABEL: combine_test22: ; SSE: # BB#0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE-NEXT: movhpd (%rsi), %xmm0 +; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_test22: ; AVX: # BB#0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; AVX-NEXT: vmovhpd (%rsi), %xmm0, %xmm0 +; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] ; AVX-NEXT: retq ; Current AVX2 lowering of this is still awful, not adding a test case. %1 = load <2 x float>, <2 x float>* %a, align 8 @@ -1798,6 +1803,29 @@ define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) { ret <8 x float> %3 } +; PR22359 +define void @combine_test23(<8 x float> %v, <2 x float>* %ptr) { +; SSE-LABEL: combine_test23: +; SSE: # BB#0: +; SSE-NEXT: movups %xmm0, (%rdi) +; SSE-NEXT: retq +; +; AVX-LABEL: combine_test23: +; AVX: # BB#0: +; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0,1],xmm1[0],xmm0[3] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] +; AVX-NEXT: vmovups %xmm0, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq + %idx2 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 1 + %shuffle0 = shufflevector <8 x float> %v, <8 x float> undef, <2 x i32> <i32 0, i32 1> + %shuffle1 = shufflevector <8 x float> %v, <8 x float> undef, <2 x i32> <i32 2, i32 3> + store <2 x float> %shuffle0, <2 x float>* %ptr, align 8 + store <2 x float> %shuffle1, <2 x float>* %idx2, align 8 + ret void +} + ; Check some negative cases. ; FIXME: Do any of these really make sense? Are they redundant with the above tests? @@ -2412,7 +2440,7 @@ define <4 x float> @combine_undef_input_test9(<4 x float> %a) { ; ; AVX-LABEL: combine_undef_input_test9: ; AVX: # BB#0: -; AVX-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1] +; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1] ; AVX-NEXT: retq %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5> %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 6, i32 7, i32 0, i32 1> @@ -2603,7 +2631,7 @@ define <4 x float> @combine_undef_input_test19(<4 x float> %a) { ; ; AVX-LABEL: combine_undef_input_test19: ; AVX: # BB#0: -; AVX-NEXT: vmovhlps {{.*#+}} xmm0 = xmm0[1,1] +; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,1] ; AVX-NEXT: retq %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 2, i32 3, i32 5, i32 5> %2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 2, i32 3, i32 4, i32 5> @@ -2636,15 +2664,16 @@ define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) { ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_unneeded_subvector1: ; AVX2: # BB#0: ; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4] -; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX2-NEXT: retq %b = add <8 x i32> %a, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8> %c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4> @@ -2795,6 +2824,50 @@ define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) { ret <4 x float> %d } +; FIXME: Failed to recognise that the VMOVSD has already zero'd the upper element +define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>* %a1) { +; SSE2-LABEL: combine_scalar_load_with_blend_with_zero: +; SSE2: # BB#0: +; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: xorps %xmm1, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0] +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] +; SSE2-NEXT: movaps %xmm0, (%rsi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: combine_scalar_load_with_blend_with_zero: +; SSSE3: # BB#0: +; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSSE3-NEXT: xorps %xmm1, %xmm1 +; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0] +; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2] +; SSSE3-NEXT: movaps %xmm0, (%rsi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: combine_scalar_load_with_blend_with_zero: +; SSE41: # BB#0: +; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE41-NEXT: xorpd %xmm1, %xmm1 +; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1] +; SSE41-NEXT: movapd %xmm1, (%rsi) +; SSE41-NEXT: retq +; +; AVX-LABEL: combine_scalar_load_with_blend_with_zero: +; AVX: # BB#0: +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; AVX-NEXT: vmovapd %xmm0, (%rsi) +; AVX-NEXT: retq + %1 = load double, double* %a0, align 8 + %2 = insertelement <2 x double> undef, double %1, i32 0 + %3 = insertelement <2 x double> %2, double 0.000000e+00, i32 1 + %4 = bitcast <2 x double> %3 to <4 x float> + %5 = shufflevector <4 x float> %4, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x i32> <i32 0, i32 1, i32 4, i32 3> + store <4 x float> %5, <4 x float>* %a1, align 16 + ret void +} + define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) { ; SSE-LABEL: PR22377: ; SSE: # BB#0: # %entry @@ -2898,8 +2971,8 @@ define <8 x float> @PR22412(<8 x float> %a, <8 x float> %b) { ; AVX2-LABEL: PR22412: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3] -; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = [1,0,7,6,5,4,3,2] -; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1] ; AVX2-NEXT: retq entry: %s1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |