diff options
Diffstat (limited to 'test/CodeGen/AMDGPU/fmin_legacy.ll')
-rw-r--r-- | test/CodeGen/AMDGPU/fmin_legacy.ll | 53 |
1 files changed, 37 insertions, 16 deletions
diff --git a/test/CodeGen/AMDGPU/fmin_legacy.ll b/test/CodeGen/AMDGPU/fmin_legacy.ll index 69a0a520a476..79acd02e6d1f 100644 --- a/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -1,16 +1,20 @@ -; RUN: llc -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -mcpu=SI < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s ; FIXME: Should replace unsafe-fp-math with no signed zeros. declare i32 @llvm.r600.read.tidig.x() #1 -; FUNC-LABEL: @test_fmin_legacy_f32 +; The two inputs to the instruction are different SGPRs from the same +; super register, so we can't fold both SGPR operands even though they +; are both the same register. + +; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32: ; EG: MIN * -; SI-SAFE: v_min_legacy_f32_e64 -; SI-NONAN: v_min_f32_e64 -define void @test_fmin_legacy_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 { +; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +define void @s_test_fmin_legacy_subreg_inputs_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 { %r0 = extractelement <4 x float> %reg0, i32 0 %r1 = extractelement <4 x float> %reg0, i32 1 %r2 = fcmp uge float %r0, %r1 @@ -20,6 +24,23 @@ define void @test_fmin_legacy_f32(<4 x float> addrspace(1)* %out, <4 x float> in ret void } +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: +; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc + +; SI-SAFE-DAG: v_mov_b32_e32 [[VA:v[0-9]+]], [[A]] +; SI-NONAN-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]] + +; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[VA]] +; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[VB]] + +define void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out, float %a, float %b) #0 { + %cmp = fcmp ule float %a, %b + %val = select i1 %cmp, float %a, float %b + store float %val, float addrspace(1)* %out, align 4 + ret void +} + ; FUNC-LABEL: @test_fmin_legacy_ule_f32 ; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 @@ -30,8 +51,8 @@ define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace( %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 - %a = load float, float addrspace(1)* %gep.0, align 4 - %b = load float, float addrspace(1)* %gep.1, align 4 + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 %cmp = fcmp ule float %a, %b %val = select i1 %cmp, float %a, float %b @@ -49,8 +70,8 @@ define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace( %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 - %a = load float, float addrspace(1)* %gep.0, align 4 - %b = load float, float addrspace(1)* %gep.1, align 4 + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 %cmp = fcmp ole float %a, %b %val = select i1 %cmp, float %a, float %b @@ -68,8 +89,8 @@ define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace( %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 - %a = load float, float addrspace(1)* %gep.0, align 4 - %b = load float, float addrspace(1)* %gep.1, align 4 + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 %cmp = fcmp olt float %a, %b %val = select i1 %cmp, float %a, float %b @@ -87,8 +108,8 @@ define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace( %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 - %a = load float, float addrspace(1)* %gep.0, align 4 - %b = load float, float addrspace(1)* %gep.1, align 4 + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 %cmp = fcmp ult float %a, %b %val = select i1 %cmp, float %a, float %b @@ -172,8 +193,8 @@ define void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 ad %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 - %a = load float, float addrspace(1)* %gep.0, align 4 - %b = load float, float addrspace(1)* %gep.1, align 4 + %a = load volatile float, float addrspace(1)* %gep.0, align 4 + %b = load volatile float, float addrspace(1)* %gep.1, align 4 %cmp = fcmp ole float %a, %b %val0 = select i1 %cmp, float %a, float %b |