aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-24 19:17:53 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-24 19:17:53 +0000
commit02a336801959d4fc2ea0657d4489596e1ecbfee0 (patch)
tree776cc4ed9ad3080c3c6afbb0ffb73177c40292e8 /test/CodeGen/X86
parent7c71d32ab52480cb7bfd9f951450060263a5b9e7 (diff)
downloadsrc-02a336801959d4fc2ea0657d4489596e1ecbfee0.tar.gz
src-02a336801959d4fc2ea0657d4489596e1ecbfee0.zip
Vendor import of llvm release_40 branch r292951:vendor/llvm/llvm-release_40-r292951
Notes
Notes: svn path=/vendor/llvm/dist/; revision=312704 svn path=/vendor/llvm/llvm-release_40-r292951/; revision=312705; tag=vendor/llvm/llvm-release_40-r292951
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/sse1.ll133
1 files changed, 133 insertions, 0 deletions
diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll
index beedb1d24655..9488d6d26056 100644
--- a/test/CodeGen/X86/sse1.ll
+++ b/test/CodeGen/X86/sse1.ll
@@ -215,3 +215,136 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
ret <4 x i32> %zext
}
+; Fragile test warning - we need to induce the generation of a vselect
+; post-legalization to cause the crash seen in:
+; https://llvm.org/bugs/show_bug.cgi?id=31672
+; Is there a way to do that without an unsafe/fast sqrt intrinsic call?
+; Also, although the goal for adding this test is to prove that we
+; don't crash, I have no idea what this code is doing, so I'm keeping
+; the full codegen checks in case there's motivation to improve this.
+
+define <2 x float> @PR31672() #0 {
+; X32-LABEL: PR31672:
+; X32: # BB#0:
+; X32-NEXT: pushl %ebp
+; X32-NEXT: movl %esp, %ebp
+; X32-NEXT: andl $-16, %esp
+; X32-NEXT: subl $80, %esp
+; X32-NEXT: xorps %xmm0, %xmm0
+; X32-NEXT: movaps {{.*#+}} xmm1 = <42,3,u,u>
+; X32-NEXT: movaps %xmm1, %xmm2
+; X32-NEXT: cmpeqps %xmm0, %xmm2
+; X32-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X32-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT: rsqrtps %xmm1, %xmm0
+; X32-NEXT: mulps %xmm0, %xmm1
+; X32-NEXT: mulps %xmm0, %xmm1
+; X32-NEXT: addps {{\.LCPI.*}}, %xmm1
+; X32-NEXT: mulps {{\.LCPI.*}}, %xmm0
+; X32-NEXT: mulps %xmm1, %xmm0
+; X32-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: andl %eax, %ecx
+; X32-NEXT: notl %eax
+; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: movl %eax, (%esp)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: andl %ecx, %edx
+; X32-NEXT: notl %ecx
+; X32-NEXT: andl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: orl %edx, %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: andl %ecx, %edx
+; X32-NEXT: notl %ecx
+; X32-NEXT: andl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: orl %edx, %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: andl %eax, %ecx
+; X32-NEXT: notl %eax
+; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: orl %ecx, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-NEXT: movl %ebp, %esp
+; X32-NEXT: popl %ebp
+; X32-NEXT: retl
+;
+; X64-LABEL: PR31672:
+; X64: # BB#0:
+; X64-NEXT: xorps %xmm0, %xmm0
+; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movaps {{.*#+}} xmm1 = <42,3,u,u>
+; X64-NEXT: cmpeqps %xmm1, %xmm0
+; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: rsqrtps %xmm1, %xmm0
+; X64-NEXT: mulps %xmm0, %xmm1
+; X64-NEXT: mulps %xmm0, %xmm1
+; X64-NEXT: addps {{.*}}(%rip), %xmm1
+; X64-NEXT: mulps {{.*}}(%rip), %xmm0
+; X64-NEXT: mulps %xmm1, %xmm0
+; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi
+; X64-NEXT: movl %r9d, %esi
+; X64-NEXT: andl %edi, %esi
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: notl %ecx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax
+; X64-NEXT: andl %eax, %ecx
+; X64-NEXT: orl %esi, %ecx
+; X64-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movl %r8d, %ecx
+; X64-NEXT: andl %r10d, %ecx
+; X64-NEXT: movl %r10d, %esi
+; X64-NEXT: notl %esi
+; X64-NEXT: andl %edx, %esi
+; X64-NEXT: orl %ecx, %esi
+; X64-NEXT: movl %esi, -{{[0-9]+}}(%rsp)
+; X64-NEXT: shrq $32, %r9
+; X64-NEXT: shrq $32, %rdi
+; X64-NEXT: andl %edi, %r9d
+; X64-NEXT: notl %edi
+; X64-NEXT: shrq $32, %rax
+; X64-NEXT: andl %edi, %eax
+; X64-NEXT: orl %r9d, %eax
+; X64-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; X64-NEXT: shrq $32, %r8
+; X64-NEXT: shrq $32, %r10
+; X64-NEXT: andl %r10d, %r8d
+; X64-NEXT: notl %r10d
+; X64-NEXT: shrq $32, %rdx
+; X64-NEXT: andl %r10d, %edx
+; X64-NEXT: orl %r8d, %edx
+; X64-NEXT: movl %edx, -{{[0-9]+}}(%rsp)
+; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X64-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-NEXT: retq
+ %t0 = call fast <2 x float> @llvm.sqrt.v2f32(<2 x float> <float 42.0, float 3.0>)
+ ret <2 x float> %t0
+}
+
+declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) #1
+
+attributes #0 = { nounwind "unsafe-fp-math"="true" }
+