diff options
Diffstat (limited to 'test/CodeGen/ARM/fast-isel.ll')
-rw-r--r-- | test/CodeGen/ARM/fast-isel.ll | 96 |
1 files changed, 82 insertions, 14 deletions
diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll index 465e85f9a832..417e2d9e410a 100644 --- a/test/CodeGen/ARM/fast-isel.ll +++ b/test/CodeGen/ARM/fast-isel.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM -; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB ; Very basic fast-isel functionality. define i32 @add(i32 %a, i32 %b) nounwind { @@ -142,19 +142,87 @@ define void @test4() { store i32 %b, i32* @test4g ret void -; THUMB: ldr.n r0, LCPI4_1 +; THUMB: movw r0, :lower16:L_test4g$non_lazy_ptr +; THUMB: movt r0, :upper16:L_test4g$non_lazy_ptr ; THUMB: ldr r0, [r0] -; THUMB: ldr r0, [r0] -; THUMB: adds r0, #1 -; THUMB: ldr.n r1, LCPI4_0 -; THUMB: ldr r1, [r1] -; THUMB: str r0, [r1] +; THUMB: ldr r1, [r0] +; THUMB: adds r1, #1 +; THUMB: str r1, [r0] -; ARM: ldr r0, LCPI4_1 -; ARM: ldr r0, [r0] +; ARM: movw r0, :lower16:L_test4g$non_lazy_ptr +; ARM: movt r0, :upper16:L_test4g$non_lazy_ptr ; ARM: ldr r0, [r0] -; ARM: add r0, r0, #1 -; ARM: ldr r1, LCPI4_0 -; ARM: ldr r1, [r1] -; ARM: str r0, [r1] +; ARM: ldr r1, [r0] +; ARM: add r1, r1, #1 +; ARM: str r1, [r0] +} + +; Check unaligned stores +%struct.anon = type <{ float }> + +@a = common global %struct.anon* null, align 4 + +define void @unaligned_store(float %x, float %y) nounwind { +entry: +; ARM: @unaligned_store +; ARM: vmov r1, s0 +; ARM: str r1, [r0] + +; THUMB: @unaligned_store +; THUMB: vmov r1, s0 +; THUMB: str r1, [r0] + + %add = fadd float %x, %y + %0 = load %struct.anon** @a, align 4 + %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0 + store float %add, float* %x1, align 1 + ret void +} + +; Doublewords require only word-alignment. +; rdar://10528060 +%struct.anon.0 = type { double } + +@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4 + +define void @test5(double %a, double %b) nounwind { +entry: +; ARM: @test5 +; THUMB: @test5 + %add = fadd double %a, %b + store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4 +; ARM: vstr d16, [r0] +; THUMB: vstr d16, [r0] + ret void +} + +; Check unaligned loads of floats +%class.TAlignTest = type <{ i16, float }> + +define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 { +entry: +; ARM: @test6 +; THUMB: @test6 + %0 = alloca %class.TAlignTest*, align 4 + store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4 + %1 = load %class.TAlignTest** %0 + %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1 + %3 = load float* %2, align 1 + %4 = fcmp une float %3, 0.000000e+00 +; ARM: ldr r0, [r0, #2] +; ARM: vmov s0, r0 +; ARM: vcmpe.f32 s0, #0 +; THUMB: ldr.w r0, [r0, #2] +; THUMB: vmov s0, r0 +; THUMB: vcmpe.f32 s0, #0 + ret i1 %4 +} + +; ARM: @urem_fold +; THUMB: @urem_fold +; ARM: and r0, r0, #31 +; THUMB: and r0, r0, #31 +define i32 @urem_fold(i32 %a) nounwind { + %rem = urem i32 %a, 32 + ret i32 %rem } |