diff options
Diffstat (limited to 'test/CodeGen/X86/vselect-avx.ll')
| -rw-r--r-- | test/CodeGen/X86/vselect-avx.ll | 102 |
1 files changed, 68 insertions, 34 deletions
diff --git a/test/CodeGen/X86/vselect-avx.ll b/test/CodeGen/X86/vselect-avx.ll index 002561042688..d9f783756d1e 100644 --- a/test/CodeGen/X86/vselect-avx.ll +++ b/test/CodeGen/X86/vselect-avx.ll @@ -1,23 +1,29 @@ -; RUN: llc %s -o - -mattr=+avx | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-macosx -mattr=+avx | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx" ; For this test we used to optimize the <i1 true, i1 false, i1 false, i1 true> ; mask into <i32 2147483648, i32 0, i32 0, i32 2147483648> because we thought ; we would lower that into a blend where only the high bit is relevant. ; However, since the whole mask is constant, this is simplified incorrectly ; by the generic code, because it was expecting -1 in place of 2147483648. -; +; ; The problem does not occur without AVX, because vselect of v4i32 is not legal ; nor custom. ; ; <rdar://problem/18675020> -; CHECK-LABEL: test: -; CHECK: vmovdqa {{.*#+}} xmm1 = [65533,124,125,14807] -; CHECK: vmovdqa {{.*#+}} xmm1 = [65535,0,0,65535] -; CHECK: ret define void @test(<4 x i16>* %a, <4 x i16>* %b) { +; CHECK-LABEL: test: +; CHECK: ## BB#0: ## %body +; CHECK-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [65533,124,125,14807] +; CHECK-NEXT: vpshufb %xmm0, %xmm1, %xmm1 +; CHECK-NEXT: vmovq %xmm1, (%rdi) +; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [65535,0,0,65535] +; CHECK-NEXT: vpshufb %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vmovq %xmm0, (%rsi) +; CHECK-NEXT: retq body: %predphi = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -3, i16 545, i16 4385, i16 14807>, <4 x i16> <i16 123, i16 124, i16 125, i16 127> %predphi42 = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer @@ -31,17 +37,22 @@ body: ; When shrinking the condition used into the select to match a blend, this ; test case exercises the path where the modified node is not the root ; of the condition. -; -; CHECK-LABEL: test2: -; CHECK: vpslld $31, %xmm0, %xmm0 -; CHECK-NEXT: vpsrad $31, %xmm0, %xmm0 -; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 -; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 -; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, [[MASK:%ymm[0-9]+]] -; CHECK: vblendvpd [[MASK]] -; CHECK: retq + define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) { +; CHECK-LABEL: test2: +; CHECK: ## BB#0: ## %bb +; CHECK-NEXT: vpslld $31, %xmm0, %xmm0 +; CHECK-NEXT: vpsrad $31, %xmm0, %xmm0 +; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; CHECK-NEXT: movq (%rdi,%rsi,8), %rax +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [5.000000e-01,5.000000e-01,5.000000e-01,5.000000e-01] +; CHECK-NEXT: vblendvpd %ymm0, {{.*}}(%rip), %ymm1, %ymm0 +; CHECK-NEXT: vmovupd %ymm0, (%rax) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq bb: %arrayidx1928 = getelementptr inbounds double*, double** %call1559, i64 %indvars.iv4198 %tmp1888 = load double*, double** %arrayidx1928, align 8 @@ -57,22 +68,32 @@ bb: ; to be optimized into a and. In that case, the conditional mask was wrong. ; ; Make sure that the and is fed by the original mask. -; +; ; <rdar://problem/18819506> -; CHECK-LABEL: test3: -; Compute the mask. -; CHECK: vpcmpeqd {{%xmm[0-9]+}}, {{%xmm[0-9]+}}, [[MASK:%xmm[0-9]+]] -; Do not shrink the bit of the mask. -; CHECK-NOT: vpslld $31, [[MASK]], {{%xmm[0-9]+}} -; Use the mask in the blend. -; CHECK-NEXT: vblendvps [[MASK]], %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}} -; Shuffle mask to truncate. -; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; CHECK: vpshufb %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}} -; CHECK: vpshufb %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}} -; CHECK: retq define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) { +; CHECK-LABEL: test3: +; CHECK: ## BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766] +; CHECK-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; CHECK-NEXT: vpmuldq %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vpmuldq %xmm3, %xmm0, %xmm3 +; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7] +; CHECK-NEXT: vpsrld $31, %xmm3, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm3, %xmm3 +; CHECK-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3 +; CHECK-NEXT: vpsubd %xmm3, %xmm0, %xmm0 +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; CHECK-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm1 +; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; CHECK-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; CHECK-NEXT: vmovq %xmm0, (%rdi) +; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: vmovq %xmm0, (%rsi) +; CHECK-NEXT: retq %tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3> %tmp7 = icmp eq <4 x i32> %tmp6, zeroinitializer %predphi = select <4 x i1> %tmp7, <4 x i16> %tmp3, <4 x i16> %tmp12 @@ -85,11 +106,24 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, ; We shouldn't try to lower this directly using VSELECT because we don't have ; vpblendvb in AVX1, only in AVX2. Instead, it should be expanded. -; -; CHECK-LABEL: PR22706: -; CHECK: vpcmpgtb -; CHECK: vpcmpgtb + define <32 x i8> @PR22706(<32 x i1> %x) { +; CHECK-LABEL: PR22706: +; CHECK: ## BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpsllw $7, %xmm1, %xmm1 +; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; CHECK-NEXT: vpand %xmm2, %xmm1, %xmm1 +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 +; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0 +; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0 +; CHECK-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; CHECK-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm1 +; CHECK-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: retq %tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2> ret <32 x i8> %tmp } |
