From 6b3f41ed88e8e440e11a4fbf20b6600529f80049 Mon Sep 17 00:00:00 2001 From: Dimitry Andric Date: Tue, 16 May 2017 19:46:52 +0000 Subject: Vendor import of llvm trunk r303197: https://llvm.org/svn/llvm-project/llvm/trunk@303197 --- test/CodeGen/X86/vector-lzcnt-128.ll | 380 ++++++++++++++++++++++++++--------- 1 file changed, 281 insertions(+), 99 deletions(-) (limited to 'test/CodeGen/X86/vector-lzcnt-128.ll') diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll index f1f795bf3cb0..e3261d15538f 100644 --- a/test/CodeGen/X86/vector-lzcnt-128.ll +++ b/test/CodeGen/X86/vector-lzcnt-128.ll @@ -1,15 +1,17 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VLCD -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512CD +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64 --check-prefix=SSE --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefix=X64 --check-prefix=AVX512VLBWDQ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512cd -mattr=+avx512vl | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512 --check-prefix=AVX512VLCD +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512cd | FileCheck %s --check-prefix=X64 --check-prefix=NOBW --check-prefix=AVX512 --check-prefix=AVX512CD ; ; Just one 32-bit run to make sure we do reasonable things for i64 lzcnt. -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE41 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE define <2 x i64> @testv2i64(<2 x i64> %in) nounwind { ; SSE2-LABEL: testv2i64: @@ -194,16 +196,46 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind { ; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv2i64: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1 +; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 +; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512VLCD-LABEL: testv2i64: -; AVX512VLCD: ## BB#0: +; AVX512VLCD: # BB#0: ; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0 ; AVX512VLCD-NEXT: retq ; ; AVX512CD-LABEL: testv2i64: -; AVX512CD: ## BB#0: -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD: # BB#0: +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: retq ; ; X32-SSE-LABEL: testv2i64: @@ -429,16 +461,46 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind { ; AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv2i64u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1 +; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 +; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqd %xmm4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlq $32, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512VLCD-LABEL: testv2i64u: -; AVX512VLCD: ## BB#0: +; AVX512VLCD: # BB#0: ; AVX512VLCD-NEXT: vplzcntq %xmm0, %xmm0 ; AVX512VLCD-NEXT: retq ; ; AVX512CD-LABEL: testv2i64u: -; AVX512CD: ## BB#0: -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD: # BB#0: +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0 -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: retq ; ; X32-SSE-LABEL: testv2i64u: @@ -651,16 +713,41 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind { ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv4i32: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1 +; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 +; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512VLCD-LABEL: testv4i32: -; AVX512VLCD: ## BB#0: +; AVX512VLCD: # BB#0: ; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0 ; AVX512VLCD-NEXT: retq ; ; AVX512CD-LABEL: testv4i32: -; AVX512CD: ## BB#0: -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD: # BB#0: +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: retq ; ; X32-SSE-LABEL: testv4i32: @@ -867,16 +954,41 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind { ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv4i32u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1 +; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 +; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpand %xmm2, %xmm1, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrld $16, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512VLCD-LABEL: testv4i32u: -; AVX512VLCD: ## BB#0: +; AVX512VLCD: # BB#0: ; AVX512VLCD-NEXT: vplzcntd %xmm0, %xmm0 ; AVX512VLCD-NEXT: retq ; ; AVX512CD-LABEL: testv4i32u: -; AVX512CD: ## BB#0: -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD: # BB#0: +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 -; AVX512CD-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 +; AVX512CD-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512CD-NEXT: retq ; ; X32-SSE-LABEL: testv4i32u: @@ -1054,8 +1166,28 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind { ; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv8i16: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1 +; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 +; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512VLCD-LABEL: testv8i16: -; AVX512VLCD: ## BB#0: +; AVX512VLCD: # BB#0: ; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0 ; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0 @@ -1063,7 +1195,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind { ; AVX512VLCD-NEXT: retq ; ; AVX512CD-LABEL: testv8i16: -; AVX512CD: ## BB#0: +; AVX512CD: # BB#0: ; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 ; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0 @@ -1238,8 +1370,28 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind { ; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv8i16u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm4, %xmm1 +; AVX512VLBWDQ-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5 +; AVX512VLBWDQ-NEXT: vpand %xmm5, %xmm2, %xmm2 +; AVX512VLBWDQ-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512VLCD-LABEL: testv8i16u: -; AVX512VLCD: ## BB#0: +; AVX512VLCD: # BB#0: ; AVX512VLCD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512VLCD-NEXT: vplzcntd %ymm0, %ymm0 ; AVX512VLCD-NEXT: vpmovdw %ymm0, %xmm0 @@ -1247,7 +1399,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind { ; AVX512VLCD-NEXT: retq ; ; AVX512CD-LABEL: testv8i16u: -; AVX512CD: ## BB#0: +; AVX512CD: # BB#0: ; AVX512CD-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0 ; AVX512CD-NEXT: vpmovdw %zmm0, %ymm0 @@ -1399,8 +1551,23 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind { ; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv16i8: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0 +; AVX512VLBWDQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512-LABEL: testv16i8: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512-NEXT: vplzcntd %zmm0, %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, %xmm0 @@ -1546,8 +1713,23 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind { ; AVX-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq ; +; AVX512VLBWDQ-LABEL: testv16i8u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] +; AVX512VLBWDQ-NEXT: vpshufb %xmm2, %xmm3, %xmm2 +; AVX512VLBWDQ-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512VLBWDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512VLBWDQ-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm1 +; AVX512VLBWDQ-NEXT: vpand %xmm1, %xmm2, %xmm1 +; AVX512VLBWDQ-NEXT: vpshufb %xmm0, %xmm3, %xmm0 +; AVX512VLBWDQ-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; AVX512VLBWDQ-NEXT: retq +; ; AVX512-LABEL: testv16i8u: -; AVX512: ## BB#0: +; AVX512: # BB#0: ; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; AVX512-NEXT: vplzcntd %zmm0, %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, %xmm0 @@ -1582,17 +1764,17 @@ define <2 x i64> @foldv2i64() nounwind { ; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: foldv2i64: -; AVX: # BB#0: -; AVX-NEXT: movl $55, %eax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: retq +; NOBW-LABEL: foldv2i64: +; NOBW: # BB#0: +; NOBW-NEXT: movl $55, %eax +; NOBW-NEXT: vmovq %rax, %xmm0 +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv2i64: -; AVX512: ## BB#0: -; AVX512-NEXT: movl $55, %eax -; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv2i64: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: movl $55, %eax +; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0 +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv2i64: ; X32-SSE: # BB#0: @@ -1610,17 +1792,17 @@ define <2 x i64> @foldv2i64u() nounwind { ; SSE-NEXT: movq %rax, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: foldv2i64u: -; AVX: # BB#0: -; AVX-NEXT: movl $55, %eax -; AVX-NEXT: vmovq %rax, %xmm0 -; AVX-NEXT: retq +; NOBW-LABEL: foldv2i64u: +; NOBW: # BB#0: +; NOBW-NEXT: movl $55, %eax +; NOBW-NEXT: vmovq %rax, %xmm0 +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv2i64u: -; AVX512: ## BB#0: -; AVX512-NEXT: movl $55, %eax -; AVX512-NEXT: vmovq %rax, %xmm0 -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv2i64u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: movl $55, %eax +; AVX512VLBWDQ-NEXT: vmovq %rax, %xmm0 +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv2i64u: ; X32-SSE: # BB#0: @@ -1637,15 +1819,15 @@ define <4 x i32> @foldv4i32() nounwind { ; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24] ; SSE-NEXT: retq ; -; AVX-LABEL: foldv4i32: -; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] -; AVX-NEXT: retq +; NOBW-LABEL: foldv4i32: +; NOBW: # BB#0: +; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv4i32: -; AVX512: ## BB#0: -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv4i32: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv4i32: ; X32-SSE: # BB#0: @@ -1661,15 +1843,15 @@ define <4 x i32> @foldv4i32u() nounwind { ; SSE-NEXT: movaps {{.*#+}} xmm0 = [23,0,32,24] ; SSE-NEXT: retq ; -; AVX-LABEL: foldv4i32u: -; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] -; AVX-NEXT: retq +; NOBW-LABEL: foldv4i32u: +; NOBW: # BB#0: +; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv4i32u: -; AVX512: ## BB#0: -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv4i32u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovaps {{.*#+}} xmm0 = [23,0,32,24] +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv4i32u: ; X32-SSE: # BB#0: @@ -1685,15 +1867,15 @@ define <8 x i16> @foldv8i16() nounwind { ; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] ; SSE-NEXT: retq ; -; AVX-LABEL: foldv8i16: -; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] -; AVX-NEXT: retq +; NOBW-LABEL: foldv8i16: +; NOBW: # BB#0: +; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv8i16: -; AVX512: ## BB#0: -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv8i16: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv8i16: ; X32-SSE: # BB#0: @@ -1709,15 +1891,15 @@ define <8 x i16> @foldv8i16u() nounwind { ; SSE-NEXT: movaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] ; SSE-NEXT: retq ; -; AVX-LABEL: foldv8i16u: -; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] -; AVX-NEXT: retq +; NOBW-LABEL: foldv8i16u: +; NOBW: # BB#0: +; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv8i16u: -; AVX512: ## BB#0: -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv8i16u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [7,0,16,8,16,13,11,9] +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv8i16u: ; X32-SSE: # BB#0: @@ -1733,15 +1915,15 @@ define <16 x i8> @foldv16i8() nounwind { ; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] ; SSE-NEXT: retq ; -; AVX-LABEL: foldv16i8: -; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] -; AVX-NEXT: retq +; NOBW-LABEL: foldv16i8: +; NOBW: # BB#0: +; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv16i8: -; AVX512: ## BB#0: -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv16i8: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv16i8: ; X32-SSE: # BB#0: @@ -1757,15 +1939,15 @@ define <16 x i8> @foldv16i8u() nounwind { ; SSE-NEXT: movaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] ; SSE-NEXT: retq ; -; AVX-LABEL: foldv16i8u: -; AVX: # BB#0: -; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] -; AVX-NEXT: retq +; NOBW-LABEL: foldv16i8u: +; NOBW: # BB#0: +; NOBW-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] +; NOBW-NEXT: retq ; -; AVX512-LABEL: foldv16i8u: -; AVX512: ## BB#0: -; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] -; AVX512-NEXT: retq +; AVX512VLBWDQ-LABEL: foldv16i8u: +; AVX512VLBWDQ: # BB#0: +; AVX512VLBWDQ-NEXT: vmovdqu {{.*#+}} xmm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2] +; AVX512VLBWDQ-NEXT: retq ; ; X32-SSE-LABEL: foldv16i8u: ; X32-SSE: # BB#0: -- cgit v1.2.3