aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/pr31773.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/pr31773.ll')
-rw-r--r--test/CodeGen/X86/pr31773.ll41
1 files changed, 35 insertions, 6 deletions
diff --git a/test/CodeGen/X86/pr31773.ll b/test/CodeGen/X86/pr31773.ll
index 8722df3f4b57..6b4261c24353 100644
--- a/test/CodeGen/X86/pr31773.ll
+++ b/test/CodeGen/X86/pr31773.ll
@@ -1,18 +1,47 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
; This matter of this test is ensuring that vpackus* is not used for umin+trunc combination, since vpackus* input is a signed number.
+
define <16 x i8> @usat_trunc_wb_256(<16 x i16> %i) {
-; CHECK-LABEL: usat_trunc_wb_256:
-; CHECK-NOT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-LABEL: usat_trunc_wb_256:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX-NEXT: vpminuw %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpminuw %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: usat_trunc_wb_256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovuswb %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%x3 = icmp ult <16 x i16> %i, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
%x5 = select <16 x i1> %x3, <16 x i16> %i, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
%x6 = trunc <16 x i16> %x5 to <16 x i8>
ret <16 x i8> %x6
}
-
+
define <8 x i16> @usat_trunc_dw_256(<8 x i32> %i) {
-; CHECK-LABEL: usat_trunc_dw_256:
-; CHECK-NOT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX-LABEL: usat_trunc_dw_256:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; AVX-NEXT: vpminud %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: usat_trunc_dw_256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovusdw %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
%x3 = icmp ult <8 x i32> %i, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
%x5 = select <8 x i1> %x3, <8 x i32> %i, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
%x6 = trunc <8 x i32> %x5 to <8 x i16>