aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar/LoadCombine.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Scalar/LoadCombine.cpp')
-rw-r--r--lib/Transforms/Scalar/LoadCombine.cpp73
1 files changed, 37 insertions, 36 deletions
diff --git a/lib/Transforms/Scalar/LoadCombine.cpp b/lib/Transforms/Scalar/LoadCombine.cpp
index 1648878b0628..dfe51a4ce44c 100644
--- a/lib/Transforms/Scalar/LoadCombine.cpp
+++ b/lib/Transforms/Scalar/LoadCombine.cpp
@@ -35,10 +35,12 @@ using namespace llvm;
STATISTIC(NumLoadsAnalyzed, "Number of loads analyzed for combining");
STATISTIC(NumLoadsCombined, "Number of loads combined");
+#define LDCOMBINE_NAME "Combine Adjacent Loads"
+
namespace {
struct PointerOffsetPair {
Value *Pointer;
- uint64_t Offset;
+ APInt Offset;
};
struct LoadPOPPair {
@@ -63,12 +65,16 @@ public:
using llvm::Pass::doInitialization;
bool doInitialization(Function &) override;
bool runOnBasicBlock(BasicBlock &BB) override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addPreserved<GlobalsAAWrapperPass>();
+ }
- const char *getPassName() const override { return "LoadCombine"; }
+ const char *getPassName() const override { return LDCOMBINE_NAME; }
static char ID;
- typedef IRBuilder<true, TargetFolder> BuilderTy;
+ typedef IRBuilder<TargetFolder> BuilderTy;
private:
BuilderTy *Builder;
@@ -87,22 +93,25 @@ bool LoadCombine::doInitialization(Function &F) {
}
PointerOffsetPair LoadCombine::getPointerOffsetPair(LoadInst &LI) {
+ auto &DL = LI.getModule()->getDataLayout();
+
PointerOffsetPair POP;
POP.Pointer = LI.getPointerOperand();
- POP.Offset = 0;
+ unsigned BitWidth = DL.getPointerSizeInBits(LI.getPointerAddressSpace());
+ POP.Offset = APInt(BitWidth, 0);
+
while (isa<BitCastInst>(POP.Pointer) || isa<GetElementPtrInst>(POP.Pointer)) {
if (auto *GEP = dyn_cast<GetElementPtrInst>(POP.Pointer)) {
- auto &DL = LI.getModule()->getDataLayout();
- unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
- APInt Offset(BitWidth, 0);
- if (GEP->accumulateConstantOffset(DL, Offset))
- POP.Offset += Offset.getZExtValue();
- else
+ APInt LastOffset = POP.Offset;
+ if (!GEP->accumulateConstantOffset(DL, POP.Offset)) {
// Can't handle GEPs with variable indices.
+ POP.Offset = LastOffset;
return POP;
+ }
POP.Pointer = GEP->getPointerOperand();
- } else if (auto *BC = dyn_cast<BitCastInst>(POP.Pointer))
+ } else if (auto *BC = dyn_cast<BitCastInst>(POP.Pointer)) {
POP.Pointer = BC->getOperand(0);
+ }
}
return POP;
}
@@ -115,8 +124,8 @@ bool LoadCombine::combineLoads(
continue;
std::sort(Loads.second.begin(), Loads.second.end(),
[](const LoadPOPPair &A, const LoadPOPPair &B) {
- return A.POP.Offset < B.POP.Offset;
- });
+ return A.POP.Offset.slt(B.POP.Offset);
+ });
if (aggregateLoads(Loads.second))
Combined = true;
}
@@ -132,28 +141,31 @@ bool LoadCombine::aggregateLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
LoadInst *BaseLoad = nullptr;
SmallVector<LoadPOPPair, 8> AggregateLoads;
bool Combined = false;
- uint64_t PrevOffset = -1ull;
+ bool ValidPrevOffset = false;
+ APInt PrevOffset;
uint64_t PrevSize = 0;
for (auto &L : Loads) {
- if (PrevOffset == -1ull) {
+ if (ValidPrevOffset == false) {
BaseLoad = L.Load;
PrevOffset = L.POP.Offset;
PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize(
L.Load->getType());
AggregateLoads.push_back(L);
+ ValidPrevOffset = true;
continue;
}
if (L.Load->getAlignment() > BaseLoad->getAlignment())
continue;
- if (L.POP.Offset > PrevOffset + PrevSize) {
+ APInt PrevEnd = PrevOffset + PrevSize;
+ if (L.POP.Offset.sgt(PrevEnd)) {
// No other load will be combinable
if (combineLoads(AggregateLoads))
Combined = true;
AggregateLoads.clear();
- PrevOffset = -1;
+ ValidPrevOffset = false;
continue;
}
- if (L.POP.Offset != PrevOffset + PrevSize)
+ if (L.POP.Offset != PrevEnd)
// This load is offset less than the size of the last load.
// FIXME: We may want to handle this case.
continue;
@@ -199,7 +211,7 @@ bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
Value *Ptr = Builder->CreateConstGEP1_64(
Builder->CreatePointerCast(Loads[0].POP.Pointer,
Builder->getInt8PtrTy(AddressSpace)),
- Loads[0].POP.Offset);
+ Loads[0].POP.Offset.getSExtValue());
LoadInst *NewLoad = new LoadInst(
Builder->CreatePointerCast(
Ptr, PointerType::get(IntegerType::get(Ptr->getContext(), TotalSize),
@@ -212,7 +224,7 @@ bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
Value *V = Builder->CreateExtractInteger(
L.Load->getModule()->getDataLayout(), NewLoad,
cast<IntegerType>(L.Load->getType()),
- L.POP.Offset - Loads[0].POP.Offset, "combine.extract");
+ (L.POP.Offset - Loads[0].POP.Offset).getZExtValue(), "combine.extract");
L.Load->replaceAllUsesWith(V);
}
@@ -221,12 +233,12 @@ bool LoadCombine::combineLoads(SmallVectorImpl<LoadPOPPair> &Loads) {
}
bool LoadCombine::runOnBasicBlock(BasicBlock &BB) {
- if (skipOptnoneFunction(BB))
+ if (skipBasicBlock(BB))
return false;
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- IRBuilder<true, TargetFolder> TheBuilder(
+ IRBuilder<TargetFolder> TheBuilder(
BB.getContext(), TargetFolder(BB.getModule()->getDataLayout()));
Builder = &TheBuilder;
@@ -260,23 +272,12 @@ bool LoadCombine::runOnBasicBlock(BasicBlock &BB) {
return Combined;
}
-void LoadCombine::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
-
- AU.addRequired<AAResultsWrapperPass>();
- AU.addPreserved<GlobalsAAWrapperPass>();
-}
-
char LoadCombine::ID = 0;
BasicBlockPass *llvm::createLoadCombinePass() {
return new LoadCombine();
}
-INITIALIZE_PASS_BEGIN(LoadCombine, "load-combine", "Combine Adjacent Loads",
- false, false)
+INITIALIZE_PASS_BEGIN(LoadCombine, "load-combine", LDCOMBINE_NAME, false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
-INITIALIZE_PASS_END(LoadCombine, "load-combine", "Combine Adjacent Loads",
- false, false)
-
+INITIALIZE_PASS_END(LoadCombine, "load-combine", LDCOMBINE_NAME, false, false)