aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/MergeConsecutiveStores.ll
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-04-08 18:41:23 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-04-08 18:41:23 +0000
commit4a16efa3e43e35f0cc9efe3a67f620f0017c3d36 (patch)
tree06099edc18d30894081a822b756f117cbe0b8207 /test/CodeGen/X86/MergeConsecutiveStores.ll
parent482e7bddf617ae804dc47133cb07eb4aa81e45de (diff)
downloadsrc-4a16efa3e43e35f0cc9efe3a67f620f0017c3d36.tar.gz
src-4a16efa3e43e35f0cc9efe3a67f620f0017c3d36.zip
Vendor import of llvm trunk r178860:vendor/llvm/llvm-trunk-r178860
Notes
Notes: svn path=/vendor/llvm/dist/; revision=249259 svn path=/vendor/llvm/llvm-trunk-r178860/; revision=249260; tag=vendor/llvm/llvm-trunk-r178860
Diffstat (limited to 'test/CodeGen/X86/MergeConsecutiveStores.ll')
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll132
1 files changed, 131 insertions, 1 deletions
diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll
index 64825bac9719..bb227a0185df 100644
--- a/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -40,9 +40,43 @@ define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwt
ret void
}
+; No vectors because we use noimplicitfloat
+; CHECK: merge_const_store_no_vec
+; CHECK-NOT: vmovups
+; CHECK: ret
+define void @merge_const_store_no_vec(i32 %count, %struct.B* nocapture %p) noimplicitfloat{
+ %1 = icmp sgt i32 %count, 0
+ br i1 %1, label %.lr.ph, label %._crit_edge
+.lr.ph:
+ %i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
+ %.01 = phi %struct.B* [ %11, %.lr.ph ], [ %p, %0 ]
+ %2 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0
+ store i32 0, i32* %2, align 4
+ %3 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1
+ store i32 0, i32* %3, align 4
+ %4 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2
+ store i32 0, i32* %4, align 4
+ %5 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3
+ store i32 0, i32* %5, align 4
+ %6 = getelementptr inbounds %struct.B* %.01, i64 0, i32 4
+ store i32 0, i32* %6, align 4
+ %7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 5
+ store i32 0, i32* %7, align 4
+ %8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 6
+ store i32 0, i32* %8, align 4
+ %9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 7
+ store i32 0, i32* %9, align 4
+ %10 = add nsw i32 %i.02, 1
+ %11 = getelementptr inbounds %struct.B* %.01, i64 1
+ %exitcond = icmp eq i32 %10, %count
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+._crit_edge:
+ ret void
+}
+
; Move the constants using a single vector store.
; CHECK: merge_const_store_vec
-; CHECK: vmovups %ymm0, (%rsi)
+; CHECK: vmovups
; CHECK: ret
define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind uwtable noinline ssp {
%1 = icmp sgt i32 %count, 0
@@ -303,3 +337,99 @@ block4: ; preds = %4, %.lr.ph
ret void
}
+; Make sure that we merge the consecutive load/store sequence below and use a
+; word (16 bit) instead of a byte copy.
+; CHECK: MergeLoadStoreBaseIndexOffset
+; CHECK: movw (%{{.*}},%{{.*}}), [[REG:%[a-z]+]]
+; CHECK: movw [[REG]], (%{{.*}})
+define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
+ br label %1
+
+; <label>:1
+ %.09 = phi i32 [ %n, %0 ], [ %11, %1 ]
+ %.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
+ %.0 = phi i64* [ %a, %0 ], [ %2, %1 ]
+ %2 = getelementptr inbounds i64* %.0, i64 1
+ %3 = load i64* %.0, align 1
+ %4 = getelementptr inbounds i8* %c, i64 %3
+ %5 = load i8* %4, align 1
+ %6 = add i64 %3, 1
+ %7 = getelementptr inbounds i8* %c, i64 %6
+ %8 = load i8* %7, align 1
+ store i8 %5, i8* %.08, align 1
+ %9 = getelementptr inbounds i8* %.08, i64 1
+ store i8 %8, i8* %9, align 1
+ %10 = getelementptr inbounds i8* %.08, i64 2
+ %11 = add nsw i32 %.09, -1
+ %12 = icmp eq i32 %11, 0
+ br i1 %12, label %13, label %1
+
+; <label>:13
+ ret void
+}
+
+; Make sure that we merge the consecutive load/store sequence below and use a
+; word (16 bit) instead of a byte copy even if there are intermediate sign
+; extensions.
+; CHECK: MergeLoadStoreBaseIndexOffsetSext
+; CHECK: movw (%{{.*}},%{{.*}}), [[REG:%[a-z]+]]
+; CHECK: movw [[REG]], (%{{.*}})
+define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
+ br label %1
+
+; <label>:1
+ %.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
+ %.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
+ %.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
+ %2 = getelementptr inbounds i8* %.0, i64 1
+ %3 = load i8* %.0, align 1
+ %4 = sext i8 %3 to i64
+ %5 = getelementptr inbounds i8* %c, i64 %4
+ %6 = load i8* %5, align 1
+ %7 = add i64 %4, 1
+ %8 = getelementptr inbounds i8* %c, i64 %7
+ %9 = load i8* %8, align 1
+ store i8 %6, i8* %.08, align 1
+ %10 = getelementptr inbounds i8* %.08, i64 1
+ store i8 %9, i8* %10, align 1
+ %11 = getelementptr inbounds i8* %.08, i64 2
+ %12 = add nsw i32 %.09, -1
+ %13 = icmp eq i32 %12, 0
+ br i1 %13, label %14, label %1
+
+; <label>:14
+ ret void
+}
+
+; However, we can only merge ignore sign extensions when they are on all memory
+; computations;
+; CHECK: loadStoreBaseIndexOffsetSextNoSex
+; CHECK-NOT: movw (%{{.*}},%{{.*}}), [[REG:%[a-z]+]]
+; CHECK-NOT: movw [[REG]], (%{{.*}})
+define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
+ br label %1
+
+; <label>:1
+ %.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
+ %.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
+ %.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
+ %2 = getelementptr inbounds i8* %.0, i64 1
+ %3 = load i8* %.0, align 1
+ %4 = sext i8 %3 to i64
+ %5 = getelementptr inbounds i8* %c, i64 %4
+ %6 = load i8* %5, align 1
+ %7 = add i8 %3, 1
+ %wrap.4 = sext i8 %7 to i64
+ %8 = getelementptr inbounds i8* %c, i64 %wrap.4
+ %9 = load i8* %8, align 1
+ store i8 %6, i8* %.08, align 1
+ %10 = getelementptr inbounds i8* %.08, i64 1
+ store i8 %9, i8* %10, align 1
+ %11 = getelementptr inbounds i8* %.08, i64 2
+ %12 = add nsw i32 %.09, -1
+ %13 = icmp eq i32 %12, 0
+ br i1 %13, label %14, label %1
+
+; <label>:14
+ ret void
+}