aboutsummaryrefslogtreecommitdiff
path: root/test/Transforms/CodeGenPrepare/X86/cttz-ctlz.ll
blob: 72d82e2a162e6489f99c83c5fbed8a8a5d71fb5f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
; RUN: opt -S -codegenprepare < %s | FileCheck %s --check-prefix=SLOW
; RUN: opt -S -codegenprepare -mattr=+bmi < %s | FileCheck %s --check-prefix=FAST_TZ
; RUN: opt -S -codegenprepare -mattr=+lzcnt < %s | FileCheck %s --check-prefix=FAST_LZ

target triple = "x86_64-unknown-unknown"
target datalayout = "e-n32:64"

; If the intrinsic is cheap, nothing should change.
; If the intrinsic is expensive, check if the input is zero to avoid the call. 
; This is undoing speculation that may have been created by SimplifyCFG + InstCombine.

define i64 @cttz(i64 %A) {
entry:
  %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
  ret i64 %z

; SLOW-LABEL: @cttz(
; SLOW: entry:
; SLOW:   %cmpz = icmp eq i64 %A, 0
; SLOW:   br i1 %cmpz, label %cond.end, label %cond.false
; SLOW: cond.false:
; SLOW:   %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
; SLOW:   br label %cond.end
; SLOW: cond.end:
; SLOW:   %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
; SLOW:   ret i64 %ctz

; FAST_TZ-LABEL: @cttz(
; FAST_TZ:  %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
; FAST_TZ:  ret i64 %z
}

define i64 @ctlz(i64 %A) {
entry:
  %z = call i64 @llvm.ctlz.i64(i64 %A, i1 false)
  ret i64 %z

; SLOW-LABEL: @ctlz(
; SLOW: entry:
; SLOW:   %cmpz = icmp eq i64 %A, 0
; SLOW:   br i1 %cmpz, label %cond.end, label %cond.false
; SLOW: cond.false:
; SLOW:   %z = call i64 @llvm.ctlz.i64(i64 %A, i1 true)
; SLOW:   br label %cond.end
; SLOW: cond.end:
; SLOW:   %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
; SLOW:   ret i64 %ctz

; FAST_LZ-LABEL: @ctlz(
; FAST_LZ:  %z = call i64 @llvm.ctlz.i64(i64 %A, i1 false)
; FAST_LZ:  ret i64 %z
}

declare i64 @llvm.cttz.i64(i64, i1)
declare i64 @llvm.ctlz.i64(i64, i1)