diff options
Diffstat (limited to 'test/Analysis/CostModel/AMDGPU')
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/add-sub.ll | 138 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/addrspacecast.ll | 45 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/bit-ops.ll | 59 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/fabs.ll | 97 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/fadd.ll | 88 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/fdiv.ll | 96 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/fmul.ll | 88 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/fsub.ll | 86 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/insertelement.ll | 37 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/mul.ll | 85 | ||||
| -rw-r--r-- | test/Analysis/CostModel/AMDGPU/shifts.ll | 61 |
11 files changed, 880 insertions, 0 deletions
diff --git a/test/Analysis/CostModel/AMDGPU/add-sub.ll b/test/Analysis/CostModel/AMDGPU/add-sub.ll new file mode 100644 index 000000000000..76b21d26faaa --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/add-sub.ll @@ -0,0 +1,138 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck %s + +; CHECK: 'add_i32' +; CHECK: estimated cost of 1 for {{.*}} add i32 +define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %add = add i32 %vec, %b + store i32 %add, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i32' +; CHECK: estimated cost of 2 for {{.*}} add <2 x i32> +define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { + %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr + %add = add <2 x i32> %vec, %b + store <2 x i32> %add, <2 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v3i32' +; CHECK: estimated cost of 3 for {{.*}} add <3 x i32> +define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr + %add = add <3 x i32> %vec, %b + store <3 x i32> %add, <3 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v4i32' +; CHECK: estimated cost of 4 for {{.*}} add <4 x i32> +define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { + %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr + %add = add <4 x i32> %vec, %b + store <4 x i32> %add, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_i64' +; CHECK: estimated cost of 2 for {{.*}} add i64 +define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %add = add i64 %vec, %b + store i64 %add, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i64' +; CHECK: estimated cost of 4 for {{.*}} add <2 x i64> +define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { + %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr + %add = add <2 x i64> %vec, %b + store <2 x i64> %add, <2 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v3i64' +; CHECK: estimated cost of 6 for {{.*}} add <3 x i64> +define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { + %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr + %add = add <3 x i64> %vec, %b + store <3 x i64> %add, <3 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v4i64' +; CHECK: estimated cost of 8 for {{.*}} add <4 x i64> +define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { + %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr + %add = add <4 x i64> %vec, %b + store <4 x i64> %add, <4 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v16i64' +; CHECK: estimated cost of 32 for {{.*}} add <16 x i64> +define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 { + %vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr + %add = add <16 x i64> %vec, %b + store <16 x i64> %add, <16 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_i16' +; CHECK: estimated cost of 1 for {{.*}} add i16 +define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { + %vec = load i16, i16 addrspace(1)* %vaddr + %add = add i16 %vec, %b + store i16 %add, i16 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i16' +; CHECK: estimated cost of 2 for {{.*}} add <2 x i16> +define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %add = add <2 x i16> %vec, %b + store <2 x i16> %add, <2 x i16> addrspace(1)* %out + ret void +} + +; CHECK: 'sub_i32' +; CHECK: estimated cost of 1 for {{.*}} sub i32 +define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %sub = sub i32 %vec, %b + store i32 %sub, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'sub_i64' +; CHECK: estimated cost of 2 for {{.*}} sub i64 +define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %sub = sub i64 %vec, %b + store i64 %sub, i64 addrspace(1)* %out + ret void +} +; CHECK: 'sub_i16' +; CHECK: estimated cost of 1 for {{.*}} sub i16 +define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { + %vec = load i16, i16 addrspace(1)* %vaddr + %sub = sub i16 %vec, %b + store i16 %sub, i16 addrspace(1)* %out + ret void +} + +; CHECK: 'sub_v2i16' +; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16> +define void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %sub = sub <2 x i16> %vec, %b + store <2 x i16> %sub, <2 x i16> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/Analysis/CostModel/AMDGPU/addrspacecast.ll b/test/Analysis/CostModel/AMDGPU/addrspacecast.ll new file mode 100644 index 000000000000..1f6cb85a789d --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/addrspacecast.ll @@ -0,0 +1,45 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri < %s | FileCheck %s + +; CHECK: 'addrspacecast_global_to_flat' +; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(4)* +define i8 addrspace(4)* @addrspacecast_global_to_flat(i8 addrspace(1)* %ptr) #0 { + %cast = addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(4)* + ret i8 addrspace(4)* %cast +} + +; CHECK: 'addrspacecast_global_to_flat_v2' +; CHECK: estimated cost of 0 for {{.*}} addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8 addrspace(4)*> +define <2 x i8 addrspace(4)*> @addrspacecast_global_to_flat_v2(<2 x i8 addrspace(1)*> %ptr) #0 { + %cast = addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8 addrspace(4)*> + ret <2 x i8 addrspace(4)*> %cast +} + +; CHECK: 'addrspacecast_global_to_flat_v32' +; CHECK: estimated cost of 0 for {{.*}} addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8 addrspace(4)*> +define <32 x i8 addrspace(4)*> @addrspacecast_global_to_flat_v32(<32 x i8 addrspace(1)*> %ptr) #0 { + %cast = addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8 addrspace(4)*> + ret <32 x i8 addrspace(4)*> %cast +} + +; CHECK: 'addrspacecast_local_to_flat' +; CHECK: estimated cost of 1 for {{.*}} addrspacecast i8 addrspace(3)* %ptr to i8 addrspace(4)* +define i8 addrspace(4)* @addrspacecast_local_to_flat(i8 addrspace(3)* %ptr) #0 { + %cast = addrspacecast i8 addrspace(3)* %ptr to i8 addrspace(4)* + ret i8 addrspace(4)* %cast +} + +; CHECK: 'addrspacecast_local_to_flat_v2' +; CHECK: estimated cost of 2 for {{.*}} addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8 addrspace(4)*> +define <2 x i8 addrspace(4)*> @addrspacecast_local_to_flat_v2(<2 x i8 addrspace(3)*> %ptr) #0 { + %cast = addrspacecast <2 x i8 addrspace(3)*> %ptr to <2 x i8 addrspace(4)*> + ret <2 x i8 addrspace(4)*> %cast +} + +; CHECK: 'addrspacecast_local_to_flat_v32' +; CHECK: estimated cost of 32 for {{.*}} addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8 addrspace(4)*> +define <32 x i8 addrspace(4)*> @addrspacecast_local_to_flat_v32(<32 x i8 addrspace(3)*> %ptr) #0 { + %cast = addrspacecast <32 x i8 addrspace(3)*> %ptr to <32 x i8 addrspace(4)*> + ret <32 x i8 addrspace(4)*> %cast +} + +attributes #0 = { nounwind readnone } diff --git a/test/Analysis/CostModel/AMDGPU/bit-ops.ll b/test/Analysis/CostModel/AMDGPU/bit-ops.ll new file mode 100644 index 000000000000..a809dbd77bbf --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/bit-ops.ll @@ -0,0 +1,59 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s + +; CHECK: 'or_i32' +; CHECK: estimated cost of 1 for {{.*}} or i32 +define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = or i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'or_i64' +; CHECK: estimated cost of 2 for {{.*}} or i64 +define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = or i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'xor_i32' +; CHECK: estimated cost of 1 for {{.*}} xor i32 +define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = xor i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'xor_i64' +; CHECK: estimated cost of 2 for {{.*}} xor i64 +define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = xor i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + + +; CHECK: 'and_i32' +; CHECK: estimated cost of 1 for {{.*}} and i32 +define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = and i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'and_i64' +; CHECK: estimated cost of 2 for {{.*}} and i64 +define void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = and i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + + +attributes #0 = { nounwind } diff --git a/test/Analysis/CostModel/AMDGPU/fabs.ll b/test/Analysis/CostModel/AMDGPU/fabs.ll new file mode 100644 index 000000000000..9c551ec8afe5 --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/fabs.ll @@ -0,0 +1,97 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s + +; CHECK: 'fabs_f32' +; CHECK: estimated cost of 0 for {{.*}} call float @llvm.fabs.f32 +define void @fabs_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 { + %vec = load float, float addrspace(1)* %vaddr + %fabs = call float @llvm.fabs.f32(float %vec) #1 + store float %fabs, float addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_v2f32' +; CHECK: estimated cost of 0 for {{.*}} call <2 x float> @llvm.fabs.v2f32 +define void @fabs_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %vec) #1 + store <2 x float> %fabs, <2 x float> addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_v3f32' +; CHECK: estimated cost of 0 for {{.*}} call <3 x float> @llvm.fabs.v3f32 +define void @fabs_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %fabs = call <3 x float> @llvm.fabs.v3f32(<3 x float> %vec) #1 + store <3 x float> %fabs, <3 x float> addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_f64' +; CHECK: estimated cost of 0 for {{.*}} call double @llvm.fabs.f64 +define void @fabs_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 { + %vec = load double, double addrspace(1)* %vaddr + %fabs = call double @llvm.fabs.f64(double %vec) #1 + store double %fabs, double addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_v2f64' +; CHECK: estimated cost of 0 for {{.*}} call <2 x double> @llvm.fabs.v2f64 +define void @fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %vec) #1 + store <2 x double> %fabs, <2 x double> addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_v3f64' +; CHECK: estimated cost of 0 for {{.*}} call <3 x double> @llvm.fabs.v3f64 +define void @fabs_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %fabs = call <3 x double> @llvm.fabs.v3f64(<3 x double> %vec) #1 + store <3 x double> %fabs, <3 x double> addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_f16' +; CHECK: estimated cost of 0 for {{.*}} call half @llvm.fabs.f16 +define void @fabs_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 { + %vec = load half, half addrspace(1)* %vaddr + %fabs = call half @llvm.fabs.f16(half %vec) #1 + store half %fabs, half addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_v2f16' +; CHECK: estimated cost of 0 for {{.*}} call <2 x half> @llvm.fabs.v2f16 +define void @fabs_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %vec) #1 + store <2 x half> %fabs, <2 x half> addrspace(1)* %out + ret void +} + +; CHECK: 'fabs_v3f16' +; CHECK: estimated cost of 0 for {{.*}} call <3 x half> @llvm.fabs.v3f16 +define void @fabs_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 { + %vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr + %fabs = call <3 x half> @llvm.fabs.v3f16(<3 x half> %vec) #1 + store <3 x half> %fabs, <3 x half> addrspace(1)* %out + ret void +} + +declare float @llvm.fabs.f32(float) #1 +declare <2 x float> @llvm.fabs.v2f32(<2 x float>) #1 +declare <3 x float> @llvm.fabs.v3f32(<3 x float>) #1 + +declare double @llvm.fabs.f64(double) #1 +declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #1 +declare <3 x double> @llvm.fabs.v3f64(<3 x double>) #1 + +declare half @llvm.fabs.f16(half) #1 +declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #1 +declare <3 x half> @llvm.fabs.v3f16(<3 x half>) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } diff --git a/test/Analysis/CostModel/AMDGPU/fadd.ll b/test/Analysis/CostModel/AMDGPU/fadd.ll new file mode 100644 index 000000000000..00e91bd6223a --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/fadd.ll @@ -0,0 +1,88 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s + +; ALL: 'fadd_f32' +; ALL: estimated cost of 1 for {{.*}} fadd float +define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fadd float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v2f32' +; ALL: estimated cost of 2 for {{.*}} fadd <2 x float> +define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fadd <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v3f32' +; ALL: estimated cost of 3 for {{.*}} fadd <3 x float> +define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fadd <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fadd_f64' +; FASTF64: estimated cost of 2 for {{.*}} fadd double +; SLOWF64: estimated cost of 3 for {{.*}} fadd double +define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fadd double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v2f64' +; FASTF64: estimated cost of 4 for {{.*}} fadd <2 x double> +; SLOWF64: estimated cost of 6 for {{.*}} fadd <2 x double> +define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fadd <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fadd_v3f64' +; FASTF64: estimated cost of 6 for {{.*}} fadd <3 x double> +; SLOWF64: estimated cost of 9 for {{.*}} fadd <3 x double> +define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fadd <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL 'fadd_f16' +; ALL estimated cost of 1 for {{.*}} fadd half +define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fadd half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL 'fadd_v2f16' +; ALL estimated cost of 2 for {{.*}} fadd <2 x half> +define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fadd <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL 'fadd_v4f16' +; ALL estimated cost of 4 for {{.*}} fadd <4 x half> +define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fadd <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/Analysis/CostModel/AMDGPU/fdiv.ll b/test/Analysis/CostModel/AMDGPU/fdiv.ll new file mode 100644 index 000000000000..3f374422ad9d --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/fdiv.ll @@ -0,0 +1,96 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CIFASTF64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CISLOWF64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SIFASTF64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=verde -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SISLOWF64 %s + +; CHECK: 'fdiv_f32' +; ALL: estimated cost of 10 for {{.*}} fdiv float +define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fdiv float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v2f32' +; ALL: estimated cost of 20 for {{.*}} fdiv <2 x float> +define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fdiv <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v3f32' +; ALL: estimated cost of 30 for {{.*}} fdiv <3 x float> +define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fdiv <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_f64' +; CIFASTF64: estimated cost of 29 for {{.*}} fdiv double +; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double +; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double +; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double +define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fdiv double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v2f64' +; CIFASTF64: estimated cost of 58 for {{.*}} fdiv <2 x double> +; CISLOWF64: estimated cost of 66 for {{.*}} fdiv <2 x double> +; SIFASTF64: estimated cost of 64 for {{.*}} fdiv <2 x double> +; SISLOWF64: estimated cost of 72 for {{.*}} fdiv <2 x double> +define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fdiv <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v3f64' +; CIFASTF64: estimated cost of 87 for {{.*}} fdiv <3 x double> +; CISLOWF64: estimated cost of 99 for {{.*}} fdiv <3 x double> +; SIFASTF64: estimated cost of 96 for {{.*}} fdiv <3 x double> +; SISLOWF64: estimated cost of 108 for {{.*}} fdiv <3 x double> +define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fdiv <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_f16' +; ALL: estimated cost of 10 for {{.*}} fdiv half +define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fdiv half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v2f16' +; ALL: estimated cost of 20 for {{.*}} fdiv <2 x half> +define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fdiv <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL: 'fdiv_v4f16' +; ALL: estimated cost of 40 for {{.*}} fdiv <4 x half> +define void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fdiv <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/Analysis/CostModel/AMDGPU/fmul.ll b/test/Analysis/CostModel/AMDGPU/fmul.ll new file mode 100644 index 000000000000..6303bb7988c5 --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/fmul.ll @@ -0,0 +1,88 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s + +; ALL: 'fmul_f32' +; ALL: estimated cost of 1 for {{.*}} fmul float +define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fmul float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v2f32' +; ALL: estimated cost of 2 for {{.*}} fmul <2 x float> +define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fmul <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v3f32' +; ALL: estimated cost of 3 for {{.*}} fmul <3 x float> +define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fmul <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fmul_f64' +; FASTF64: estimated cost of 2 for {{.*}} fmul double +; SLOWF64: estimated cost of 3 for {{.*}} fmul double +define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fmul double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v2f64' +; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double> +; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double> +define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fmul <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fmul_v3f64' +; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double> +; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double> +define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fmul <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL 'fmul_f16' +; ALL estimated cost of 1 for {{.*}} fmul half +define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fmul half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL 'fmul_v2f16' +; ALL estimated cost of 2 for {{.*}} fmul <2 x half> +define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fmul <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL 'fmul_v4f16' +; ALL estimated cost of 4 for {{.*}} fmul <4 x half> +define void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fmul <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/Analysis/CostModel/AMDGPU/fsub.ll b/test/Analysis/CostModel/AMDGPU/fsub.ll new file mode 100644 index 000000000000..e0850be9867e --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/fsub.ll @@ -0,0 +1,86 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s + +; ALL: 'fsub_f32' +; ALL: estimated cost of 1 for {{.*}} fsub float +define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 { + %vec = load float, float addrspace(1)* %vaddr + %add = fsub float %vec, %b + store float %add, float addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v2f32' +; ALL: estimated cost of 2 for {{.*}} fsub <2 x float> +define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 { + %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr + %add = fsub <2 x float> %vec, %b + store <2 x float> %add, <2 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v3f32' +; ALL: estimated cost of 3 for {{.*}} fsub <3 x float> +define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr + %add = fsub <3 x float> %vec, %b + store <3 x float> %add, <3 x float> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_f64' +; FASTF64: estimated cost of 2 for {{.*}} fsub double +; SLOWF64: estimated cost of 3 for {{.*}} fsub double +define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 { + %vec = load double, double addrspace(1)* %vaddr + %add = fsub double %vec, %b + store double %add, double addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v2f64' +; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double> +; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double> +define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 { + %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr + %add = fsub <2 x double> %vec, %b + store <2 x double> %add, <2 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v3f64' +; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double> +; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double> +define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr + %add = fsub <3 x double> %vec, %b + store <3 x double> %add, <3 x double> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_f16' +; ALL: estimated cost of 1 for {{.*}} fsub half +define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 { + %vec = load half, half addrspace(1)* %vaddr + %add = fsub half %vec, %b + store half %add, half addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v2f16' +; ALL: estimated cost of 2 for {{.*}} fsub <2 x half> +define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 { + %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr + %add = fsub <2 x half> %vec, %b + store <2 x half> %add, <2 x half> addrspace(1)* %out + ret void +} + +; ALL: 'fsub_v4f16' +; ALL: estimated cost of 4 for {{.*}} fsub <4 x half> +define void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 { + %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr + %add = fsub <4 x half> %vec, %b + store <4 x half> %add, <4 x half> addrspace(1)* %out + ret void +} diff --git a/test/Analysis/CostModel/AMDGPU/insertelement.ll b/test/Analysis/CostModel/AMDGPU/insertelement.ll new file mode 100644 index 000000000000..1765afe3169e --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/insertelement.ll @@ -0,0 +1,37 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s + +; CHECK: 'insertelement_v2i32' +; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i32> +define void @insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr) { + %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr + %insert = insertelement <2 x i32> %vec, i32 1, i32 123 + store <2 x i32> %insert, <2 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'insertelement_v2i64' +; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i64> +define void @insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr) { + %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr + %insert = insertelement <2 x i64> %vec, i64 1, i64 123 + store <2 x i64> %insert, <2 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'insertelement_v2i16' +; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i16> +define void @insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr) { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %insert = insertelement <2 x i16> %vec, i16 1, i16 123 + store <2 x i16> %insert, <2 x i16> addrspace(1)* %out + ret void +} + +; CHECK: 'insertelement_v2i8' +; CHECK: estimated cost of 0 for {{.*}} insertelement <2 x i8> +define void @insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %vaddr) { + %vec = load <2 x i8>, <2 x i8> addrspace(1)* %vaddr + %insert = insertelement <2 x i8> %vec, i8 1, i8 123 + store <2 x i8> %insert, <2 x i8> addrspace(1)* %out + ret void +} diff --git a/test/Analysis/CostModel/AMDGPU/mul.ll b/test/Analysis/CostModel/AMDGPU/mul.ll new file mode 100644 index 000000000000..cbc755a6e6a9 --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/mul.ll @@ -0,0 +1,85 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s + +; CHECK: 'mul_i32' +; CHECK: estimated cost of 3 for {{.*}} mul i32 +define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %mul = mul i32 %vec, %b + store i32 %mul, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v2i32' +; CHECK: estimated cost of 6 for {{.*}} mul <2 x i32> +define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { + %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr + %mul = mul <2 x i32> %vec, %b + store <2 x i32> %mul, <2 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v3i32' +; CHECK: estimated cost of 9 for {{.*}} mul <3 x i32> +define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr + %mul = mul <3 x i32> %vec, %b + store <3 x i32> %mul, <3 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v4i32' +; CHECK: estimated cost of 12 for {{.*}} mul <4 x i32> +define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { + %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr + %mul = mul <4 x i32> %vec, %b + store <4 x i32> %mul, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_i64' +; CHECK: estimated cost of 16 for {{.*}} mul i64 +define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %mul = mul i64 %vec, %b + store i64 %mul, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v2i64' +; CHECK: estimated cost of 32 for {{.*}} mul <2 x i64> +define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { + %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr + %mul = mul <2 x i64> %vec, %b + store <2 x i64> %mul, <2 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v3i64' +; CHECK: estimated cost of 48 for {{.*}} mul <3 x i64> +define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { + %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr + %mul = mul <3 x i64> %vec, %b + store <3 x i64> %mul, <3 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v4i64' +; CHECK: estimated cost of 64 for {{.*}} mul <4 x i64> +define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { + %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr + %mul = mul <4 x i64> %vec, %b + store <4 x i64> %mul, <4 x i64> addrspace(1)* %out + ret void +} + + +; CHECK: 'mul_v8i64' +; CHECK: estimated cost of 128 for {{.*}} mul <8 x i64> +define void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 { + %vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr + %mul = mul <8 x i64> %vec, %b + store <8 x i64> %mul, <8 x i64> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/Analysis/CostModel/AMDGPU/shifts.ll b/test/Analysis/CostModel/AMDGPU/shifts.ll new file mode 100644 index 000000000000..003aed7b2fc8 --- /dev/null +++ b/test/Analysis/CostModel/AMDGPU/shifts.ll @@ -0,0 +1,61 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=FAST64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SLOW64 %s + +; ALL: 'shl_i32' +; ALL: estimated cost of 1 for {{.*}} shl i32 +define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = shl i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; ALL: 'shl_i64' +; FAST64: estimated cost of 2 for {{.*}} shl i64 +; SLOW64: estimated cost of 3 for {{.*}} shl i64 +define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = shl i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +; ALL: 'lshr_i32' +; ALL: estimated cost of 1 for {{.*}} lshr i32 +define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = lshr i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; ALL: 'lshr_i64' +; FAST64: estimated cost of 2 for {{.*}} lshr i64 +; SLOW64: estimated cost of 3 for {{.*}} lshr i64 +define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = lshr i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +; ALL: 'ashr_i32' +; ALL: estimated cost of 1 for {{.*}} ashr i32 +define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = ashr i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; ALL: 'ashr_i64' +; FAST64: estimated cost of 2 for {{.*}} ashr i64 +; SLOW64: estimated cost of 3 for {{.*}} ashr i64 +define void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = ashr i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } |
