aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/pmul.ll
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-12-30 11:46:15 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-12-30 11:46:15 +0000
commitdd58ef019b700900793a1eb48b52123db01b654e (patch)
treefcfbb4df56a744f4ddc6122c50521dd3f1c5e196 /test/CodeGen/X86/pmul.ll
parent2fe5752e3a7c345cdb59e869278d36af33c13fa4 (diff)
downloadsrc-dd58ef019b700900793a1eb48b52123db01b654e.tar.gz
src-dd58ef019b700900793a1eb48b52123db01b654e.zip
Vendor import of llvm trunk r256633:
Notes
Notes: svn path=/vendor/llvm/dist/; revision=292915
Diffstat (limited to 'test/CodeGen/X86/pmul.ll')
-rw-r--r--test/CodeGen/X86/pmul.ll297
1 files changed, 222 insertions, 75 deletions
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index dbe5bd646c7f..37b6fdf7cfeb 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -1,6 +1,7 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 | FileCheck %s --check-prefix=AVX2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
define <16 x i8> @mul8c(<16 x i8> %i) nounwind {
; SSE2-LABEL: mul8c:
@@ -34,16 +35,34 @@ define <16 x i8> @mul8c(<16 x i8> %i) nounwind {
; SSE41-NEXT: packuswb %xmm0, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
+;
+; AVX2-LABEL: mul8c:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
entry:
%A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 >
ret <16 x i8> %A
}
define <8 x i16> @mul16c(<8 x i16> %i) nounwind {
-; ALL-LABEL: mul16c:
-; ALL: # BB#0: # %entry
-; ALL-NEXT: pmullw {{.*}}(%rip), %xmm0
-; ALL-NEXT: retq
+; SSE-LABEL: mul16c:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: mul16c:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
entry:
%A = mul <8 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 >
ret <8 x i16> %A
@@ -65,22 +84,38 @@ define <4 x i32> @a(<4 x i32> %i) nounwind {
; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
+;
+; AVX2-LABEL: a:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
+; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
entry:
%A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
ret <4 x i32> %A
}
define <2 x i64> @b(<2 x i64> %i) nounwind {
-; ALL-LABEL: b:
-; ALL: # BB#0: # %entry
-; ALL-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
-; ALL-NEXT: movdqa %xmm0, %xmm2
-; ALL-NEXT: pmuludq %xmm1, %xmm2
-; ALL-NEXT: psrlq $32, %xmm0
-; ALL-NEXT: pmuludq %xmm1, %xmm0
-; ALL-NEXT: psllq $32, %xmm0
-; ALL-NEXT: paddq %xmm2, %xmm0
-; ALL-NEXT: retq
+; SSE-LABEL: b:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [117,117]
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm1, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: b:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [117,117]
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
entry:
%A = mul <2 x i64> %i, < i64 117, i64 117 >
ret <2 x i64> %A
@@ -123,16 +158,34 @@ define <16 x i8> @mul8(<16 x i8> %i, <16 x i8> %j) nounwind {
; SSE41-NEXT: packuswb %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
+;
+; AVX2-LABEL: mul8:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
+; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
entry:
%A = mul <16 x i8> %i, %j
ret <16 x i8> %A
}
define <8 x i16> @mul16(<8 x i16> %i, <8 x i16> %j) nounwind {
-; ALL-LABEL: mul16:
-; ALL: # BB#0: # %entry
-; ALL-NEXT: pmullw %xmm1, %xmm0
-; ALL-NEXT: retq
+; SSE-LABEL: mul16:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: pmullw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: mul16:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
entry:
%A = mul <8 x i16> %i, %j
ret <8 x i16> %A
@@ -154,26 +207,44 @@ define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE41: # BB#0: # %entry
; SSE41-NEXT: pmulld %xmm1, %xmm0
; SSE41-NEXT: retq
+;
+; AVX2-LABEL: c:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
entry:
%A = mul <4 x i32> %i, %j
ret <4 x i32> %A
}
define <2 x i64> @d(<2 x i64> %i, <2 x i64> %j) nounwind {
-; ALL-LABEL: d:
-; ALL: # BB#0: # %entry
-; ALL-NEXT: movdqa %xmm0, %xmm2
-; ALL-NEXT: pmuludq %xmm1, %xmm2
-; ALL-NEXT: movdqa %xmm1, %xmm3
-; ALL-NEXT: psrlq $32, %xmm3
-; ALL-NEXT: pmuludq %xmm0, %xmm3
-; ALL-NEXT: psllq $32, %xmm3
-; ALL-NEXT: paddq %xmm3, %xmm2
-; ALL-NEXT: psrlq $32, %xmm0
-; ALL-NEXT: pmuludq %xmm1, %xmm0
-; ALL-NEXT: psllq $32, %xmm0
-; ALL-NEXT: paddq %xmm2, %xmm0
-; ALL-NEXT: retq
+; SSE-LABEL: d:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pmuludq %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: psrlq $32, %xmm3
+; SSE-NEXT: pmuludq %xmm0, %xmm3
+; SSE-NEXT: psllq $32, %xmm3
+; SSE-NEXT: paddq %xmm3, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm1, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: d:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3
+; AVX2-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
+; AVX2-NEXT: vpsllq $32, %xmm3, %xmm3
+; AVX2-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: retq
entry:
%A = mul <2 x i64> %i, %j
ret <2 x i64> %A
@@ -210,6 +281,17 @@ define <4 x i32> @e(<4 x i32> %i, <4 x i32> %j) nounwind {
; SSE41-NEXT: pmulld {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
; SSE41-NEXT: addq $40, %rsp
; SSE41-NEXT: retq
+;
+; AVX2-LABEL: e:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: callq foo
+; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX2-NEXT: vpmulld {{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
entry:
; Use a call to force spills.
call void @foo()
@@ -218,27 +300,47 @@ entry:
}
define <2 x i64> @f(<2 x i64> %i, <2 x i64> %j) nounwind {
-; ALL-LABEL: f:
-; ALL: # BB#0: # %entry
-; ALL-NEXT: subq $40, %rsp
-; ALL-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; ALL-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; ALL-NEXT: callq foo
-; ALL-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
-; ALL-NEXT: movdqa %xmm0, %xmm2
-; ALL-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; ALL-NEXT: pmuludq %xmm3, %xmm2
-; ALL-NEXT: movdqa %xmm3, %xmm1
-; ALL-NEXT: psrlq $32, %xmm1
-; ALL-NEXT: pmuludq %xmm0, %xmm1
-; ALL-NEXT: psllq $32, %xmm1
-; ALL-NEXT: paddq %xmm1, %xmm2
-; ALL-NEXT: psrlq $32, %xmm0
-; ALL-NEXT: pmuludq %xmm3, %xmm0
-; ALL-NEXT: psllq $32, %xmm0
-; ALL-NEXT: paddq %xmm2, %xmm0
-; ALL-NEXT: addq $40, %rsp
-; ALL-NEXT: retq
+; SSE-LABEL: f:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: subq $40, %rsp
+; SSE-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; SSE-NEXT: callq foo
+; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm1
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm1, %xmm2
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm3, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: addq $40, %rsp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: f:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: subq $40, %rsp
+; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX2-NEXT: callq foo
+; AVX2-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; AVX2-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload
+; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm0
+; AVX2-NEXT: vpsrlq $32, %xmm2, %xmm1
+; AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlq $32, %xmm3, %xmm1
+; AVX2-NEXT: vpmuludq %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: addq $40, %rsp
+; AVX2-NEXT: retq
entry:
; Use a call to force spills.
call void @foo()
@@ -247,31 +349,76 @@ entry:
}
define <4 x i64> @b1(<4 x i64> %i) nounwind {
-; AVX2-LABEL: @b1
-; AVX2: vpbroadcastq
-; AVX2-NEXT: vpmuludq
-; AVX2-NEXT: vpsrlq $32
-; AVX2-NEXT: vpmuludq
-; AVX2-NEXT: vpsllq $32
-; AVX2-NEXT: vpaddq
-; AVX2-NEXT: retq
+; SSE-LABEL: b1:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [117,117]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm2, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: b1:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
entry:
%A = mul <4 x i64> %i, < i64 117, i64 117, i64 117, i64 117 >
ret <4 x i64> %A
}
define <4 x i64> @b2(<4 x i64> %i, <4 x i64> %j) nounwind {
-; AVX2-LABEL: @b2
-; AVX2: vpmuludq
-; AVX2-NEXT: vpsrlq $32
-; AVX2-NEXT: vpmuludq
-; AVX2-NEXT: vpsllq $32
-; AVX2-NEXT: vpaddq
-; AVX2-NEXT: vpsrlq $32
-; AVX2-NEXT: vpmuludq
-; AVX2-NEXT: vpsllq $32
-; AVX2-NEXT: vpaddq
-; AVX2-NEXT: retq
+; SSE-LABEL: b2:
+; SSE: # BB#0: # %entry
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm0, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: paddq %xmm5, %xmm4
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pmuludq %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm1, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: paddq %xmm4, %xmm2
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm3, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: b2:
+; AVX2: # BB#0: # %entry
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm3
+; AVX2-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
+; AVX2-NEXT: vpsllq $32, %ymm3, %ymm3
+; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: retq
entry:
%A = mul <4 x i64> %i, %j
ret <4 x i64> %A