aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/vector-shuffle-combining-xop.ll
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-07-28 10:51:19 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-07-28 10:51:19 +0000
commiteb11fae6d08f479c0799db45860a98af528fa6e7 (patch)
tree44d492a50c8c1a7eb8e2d17ea3360ec4d066f042 /test/CodeGen/X86/vector-shuffle-combining-xop.ll
parentb8a2042aa938069e862750553db0e4d82d25822c (diff)
downloadsrc-eb11fae6d08f479c0799db45860a98af528fa6e7.tar.gz
src-eb11fae6d08f479c0799db45860a98af528fa6e7.zip
Vendor import of llvm trunk r338150:vendor/llvm/llvm-trunk-r338150
Notes
Notes: svn path=/vendor/llvm/dist/; revision=336809 svn path=/vendor/llvm/llvm-trunk-r338150/; revision=336814; tag=vendor/llvm/llvm-trunk-r338150
Diffstat (limited to 'test/CodeGen/X86/vector-shuffle-combining-xop.ll')
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-xop.ll49
1 files changed, 32 insertions, 17 deletions
diff --git a/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
index 83001cf5fb99..7207d8ecd59f 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-xop.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-xop.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X32 --check-prefix=X86AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=X32 --check-prefix=X86AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefix=X64 --check-prefix=X64AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+xop | FileCheck %s --check-prefix=X64 --check-prefix=X64AVX2
declare <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double>, <2 x double>, <2 x i64>, i8) nounwind readnone
declare <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double>, <4 x double>, <4 x i64>, i8) nounwind readnone
@@ -320,20 +320,35 @@ define <4 x i32> @combine_vpperm_10zz32BA(<4 x i32> %a0, <4 x i32> %a1) {
; FIXME: Duplicated load in i686
define void @buildvector_v4f32_0404(float %a, float %b, <4 x float>* %ptr) {
-; X32-LABEL: buildvector_v4f32_0404:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
-; X32-NEXT: vmovaps %xmm0, (%eax)
-; X32-NEXT: retl
+; X86AVX-LABEL: buildvector_v4f32_0404:
+; X86AVX: # %bb.0:
+; X86AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; X86AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
+; X86AVX-NEXT: vmovaps %xmm0, (%eax)
+; X86AVX-NEXT: retl
;
-; X64-LABEL: buildvector_v4f32_0404:
-; X64: # %bb.0:
-; X64-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[0],xmm1[0]
-; X64-NEXT: vmovaps %xmm0, (%rdi)
-; X64-NEXT: retq
+; X86AVX2-LABEL: buildvector_v4f32_0404:
+; X86AVX2: # %bb.0:
+; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X86AVX2-NEXT: vmovapd %xmm0, (%eax)
+; X86AVX2-NEXT: retl
+;
+; X64AVX-LABEL: buildvector_v4f32_0404:
+; X64AVX: # %bb.0:
+; X64AVX-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[0],xmm1[0]
+; X64AVX-NEXT: vmovaps %xmm0, (%rdi)
+; X64AVX-NEXT: retq
+;
+; X64AVX2-LABEL: buildvector_v4f32_0404:
+; X64AVX2: # %bb.0:
+; X64AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; X64AVX2-NEXT: vmovapd %xmm0, (%rdi)
+; X64AVX2-NEXT: retq
%v0 = insertelement <4 x float> undef, float %a, i32 0
%v1 = insertelement <4 x float> %v0, float %b, i32 1
%v2 = insertelement <4 x float> %v1, float %a, i32 2