aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/fold-load-unops.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/fold-load-unops.ll')
-rw-r--r--test/CodeGen/X86/fold-load-unops.ll154
1 files changed, 0 insertions, 154 deletions
diff --git a/test/CodeGen/X86/fold-load-unops.ll b/test/CodeGen/X86/fold-load-unops.ll
deleted file mode 100644
index d2b03dde8319..000000000000
--- a/test/CodeGen/X86/fold-load-unops.ll
+++ /dev/null
@@ -1,154 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
-
-; Verify we fold loads into unary sse intrinsics only when optimizing for size
-
-define float @rcpss(float* %a) {
-; SSE-LABEL: rcpss:
-; SSE: # BB#0:
-; SSE-NEXT: movss (%rdi), %xmm0
-; SSE-NEXT: rcpss %xmm0, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: rcpss:
-; AVX: # BB#0:
-; AVX-NEXT: vmovss (%rdi), %xmm0
-; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load float, float* %a
- %ins = insertelement <4 x float> undef, float %ld, i32 0
- %res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
- %ext = extractelement <4 x float> %res, i32 0
- ret float %ext
-}
-
-define float @rsqrtss(float* %a) {
-; SSE-LABEL: rsqrtss:
-; SSE: # BB#0:
-; SSE-NEXT: movss (%rdi), %xmm0
-; SSE-NEXT: rsqrtss %xmm0, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: rsqrtss:
-; AVX: # BB#0:
-; AVX-NEXT: vmovss (%rdi), %xmm0
-; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load float, float* %a
- %ins = insertelement <4 x float> undef, float %ld, i32 0
- %res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
- %ext = extractelement <4 x float> %res, i32 0
- ret float %ext
-}
-
-define float @sqrtss(float* %a) {
-; SSE-LABEL: sqrtss:
-; SSE: # BB#0:
-; SSE-NEXT: movss (%rdi), %xmm0
-; SSE-NEXT: sqrtss %xmm0, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: sqrtss:
-; AVX: # BB#0:
-; AVX-NEXT: vmovss (%rdi), %xmm0
-; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load float, float* %a
- %ins = insertelement <4 x float> undef, float %ld, i32 0
- %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
- %ext = extractelement <4 x float> %res, i32 0
- ret float %ext
-}
-
-define double @sqrtsd(double* %a) {
-; SSE-LABEL: sqrtsd:
-; SSE: # BB#0:
-; SSE-NEXT: movsd (%rdi), %xmm0
-; SSE-NEXT: sqrtsd %xmm0, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: sqrtsd:
-; AVX: # BB#0:
-; AVX-NEXT: vmovsd (%rdi), %xmm0
-; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load double, double* %a
- %ins = insertelement <2 x double> undef, double %ld, i32 0
- %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
- %ext = extractelement <2 x double> %res, i32 0
- ret double %ext
-}
-
-define float @rcpss_size(float* %a) optsize {
-; SSE-LABEL: rcpss_size:
-; SSE: # BB#0:
-; SSE-NEXT: rcpss (%rdi), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: rcpss_size:
-; AVX: # BB#0:
-; AVX-NEXT: vrcpss (%rdi), %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load float, float* %a
- %ins = insertelement <4 x float> undef, float %ld, i32 0
- %res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
- %ext = extractelement <4 x float> %res, i32 0
- ret float %ext
-}
-
-define float @rsqrtss_size(float* %a) optsize {
-; SSE-LABEL: rsqrtss_size:
-; SSE: # BB#0:
-; SSE-NEXT: rsqrtss (%rdi), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: rsqrtss_size:
-; AVX: # BB#0:
-; AVX-NEXT: vrsqrtss (%rdi), %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load float, float* %a
- %ins = insertelement <4 x float> undef, float %ld, i32 0
- %res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
- %ext = extractelement <4 x float> %res, i32 0
- ret float %ext
-}
-
-define float @sqrtss_size(float* %a) optsize{
-; SSE-LABEL: sqrtss_size:
-; SSE: # BB#0:
-; SSE-NEXT: sqrtss (%rdi), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: sqrtss_size:
-; AVX: # BB#0:
-; AVX-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load float, float* %a
- %ins = insertelement <4 x float> undef, float %ld, i32 0
- %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
- %ext = extractelement <4 x float> %res, i32 0
- ret float %ext
-}
-
-define double @sqrtsd_size(double* %a) optsize {
-; SSE-LABEL: sqrtsd_size:
-; SSE: # BB#0:
-; SSE-NEXT: sqrtsd (%rdi), %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: sqrtsd_size:
-; AVX: # BB#0:
-; AVX-NEXT: vsqrtsd (%rdi), %xmm0, %xmm0
-; AVX-NEXT: retq
- %ld = load double, double* %a
- %ins = insertelement <2 x double> undef, double %ld, i32 0
- %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
- %ext = extractelement <2 x double> %res, i32 0
- ret double %ext
-}
-
-declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
-declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
-declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
-declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone