aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp80
-rw-r--r--lib/Analysis/AliasAnalysisCounter.cpp15
-rw-r--r--lib/Analysis/AliasAnalysisEvaluator.cpp128
-rw-r--r--lib/Analysis/AliasDebugger.cpp10
-rw-r--r--lib/Analysis/AliasSetTracker.cpp46
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp155
-rw-r--r--lib/Analysis/BlockFrequencyInfoImpl.cpp81
-rw-r--r--lib/Analysis/CFGPrinter.cpp8
-rw-r--r--lib/Analysis/CFLAliasAnalysis.cpp68
-rw-r--r--lib/Analysis/CaptureTracking.cpp2
-rw-r--r--lib/Analysis/DivergenceAnalysis.cpp2
-rw-r--r--lib/Analysis/DomPrinter.cpp2
-rw-r--r--lib/Analysis/IPA/CallGraph.cpp40
-rw-r--r--lib/Analysis/IPA/CallGraphSCCPass.cpp6
-rw-r--r--lib/Analysis/IPA/CallPrinter.cpp2
-rw-r--r--lib/Analysis/IPA/GlobalsModRef.cpp15
-rw-r--r--lib/Analysis/InstCount.cpp2
-rw-r--r--lib/Analysis/InstructionSimplify.cpp19
-rw-r--r--lib/Analysis/LazyValueInfo.cpp40
-rw-r--r--lib/Analysis/LibCallAliasAnalysis.cpp4
-rw-r--r--lib/Analysis/LibCallSemantics.cpp5
-rw-r--r--lib/Analysis/Lint.cpp61
-rw-r--r--lib/Analysis/LoopAccessAnalysis.cpp12
-rw-r--r--lib/Analysis/LoopPass.cpp2
-rw-r--r--lib/Analysis/MemDepPrinter.cpp2
-rw-r--r--lib/Analysis/MemDerefPrinter.cpp2
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp72
-rw-r--r--lib/Analysis/MemoryLocation.cpp84
-rw-r--r--lib/Analysis/ModuleDebugInfoPrinter.cpp2
-rw-r--r--lib/Analysis/NoAliasAnalysis.cpp17
-rw-r--r--lib/Analysis/PHITransAddr.cpp15
-rw-r--r--lib/Analysis/RegionPrinter.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp18
-rw-r--r--lib/Analysis/ScalarEvolutionAliasAnalysis.cpp19
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp6
-rw-r--r--lib/Analysis/ScopedNoAliasAA.cpp16
-rw-r--r--lib/Analysis/StratifiedSets.h2
-rw-r--r--lib/Analysis/TypeBasedAliasAnalysis.cpp18
-rw-r--r--lib/Analysis/ValueTracking.cpp306
39 files changed, 790 insertions, 596 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index ce46d5300517..d44653e8c9c1 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -48,23 +48,22 @@ char AliasAnalysis::ID = 0;
// Default chaining methods
//===----------------------------------------------------------------------===//
-AliasAnalysis::AliasResult
-AliasAnalysis::alias(const Location &LocA, const Location &LocB) {
+AliasAnalysis::AliasResult AliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->alias(LocA, LocB);
}
-bool AliasAnalysis::pointsToConstantMemory(const Location &Loc,
+bool AliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->pointsToConstantMemory(Loc, OrLocal);
}
-AliasAnalysis::Location
-AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- AliasAnalysis::ModRefResult &Mask) {
+AliasAnalysis::ModRefResult
+AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getArgLocation(CS, ArgIdx, Mask);
+ return AA->getArgModRefInfo(CS, ArgIdx);
}
void AliasAnalysis::deleteValue(Value *V) {
@@ -93,7 +92,7 @@ AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// location this memory access defines. The best we can say
// is that if the call references what this instruction
// defines, it must be clobbered by this location.
- const AliasAnalysis::Location DefLoc = MemoryLocation::get(I);
+ const MemoryLocation DefLoc = MemoryLocation::get(I);
if (getModRefInfo(Call, DefLoc) != AliasAnalysis::NoModRef)
return AliasAnalysis::ModRef;
}
@@ -101,8 +100,7 @@ AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
ModRefBehavior MRB = getModRefBehavior(CS);
@@ -122,11 +120,11 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
const Value *Arg = *AI;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CSLoc =
- getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI),
- ArgMask);
- if (!isNoAlias(CSLoc, Loc)) {
+ unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
+ MemoryLocation ArgLoc =
+ MemoryLocation::getForArgument(CS, ArgIdx, *TLI);
+ if (!isNoAlias(ArgLoc, Loc)) {
+ ModRefResult ArgMask = getArgModRefInfo(CS, ArgIdx);
doesAlias = true;
AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
}
@@ -183,18 +181,18 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS2Loc =
- getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I),
- ArgMask);
- // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of
+ unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
+ auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI);
+
+ // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of
// CS1 on that location is the inverse.
+ ModRefResult ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
if (ArgMask == Mod)
ArgMask = ModRef;
else if (ArgMask == Ref)
ArgMask = Mod;
- R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask);
+ R = ModRefResult((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask);
if (R == Mask)
break;
}
@@ -212,13 +210,14 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS1Loc = getArgLocation(
- CS1, (unsigned)std::distance(CS1.arg_begin(), I), ArgMask);
- // ArgMask indicates what CS1 might do to CS1Loc; if CS1 might Mod
- // CS1Loc, then we care about either a Mod or a Ref by CS2. If CS1
+ unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
+ auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI);
+
+ // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
+ // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
// might Ref, then we care only about a Mod by CS2.
- ModRefResult ArgR = getModRefInfo(CS2, CS1Loc);
+ ModRefResult ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
+ ModRefResult ArgR = getModRefInfo(CS2, CS1ArgLoc);
if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
R = ModRefResult((R | ArgMask) & Mask);
@@ -268,7 +267,7 @@ AliasAnalysis::getModRefBehavior(const Function *F) {
//===----------------------------------------------------------------------===//
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const LoadInst *L, const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!L->isUnordered())
return ModRef;
@@ -283,7 +282,7 @@ AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!S->isUnordered())
return ModRef;
@@ -306,7 +305,7 @@ AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc) {
if (Loc.Ptr) {
// If the va_arg address cannot alias the pointer in question, then the
@@ -325,7 +324,8 @@ AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (CX->getSuccessOrdering() > Monotonic)
return ModRef;
@@ -338,7 +338,8 @@ AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW,
+ const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (RMW->getOrdering() > Monotonic)
return ModRef;
@@ -354,10 +355,8 @@ AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
// BasicAA isn't willing to spend linear time determining whether an alloca
// was captured before or after this particular call, while we are. However,
// with a smarter AA in place, this test is just wasting compile time.
-AliasAnalysis::ModRefResult
-AliasAnalysis::callCapturesBefore(const Instruction *I,
- const AliasAnalysis::Location &MemLoc,
- DominatorTree *DT) {
+AliasAnalysis::ModRefResult AliasAnalysis::callCapturesBefore(
+ const Instruction *I, const MemoryLocation &MemLoc, DominatorTree *DT) {
if (!DT)
return AliasAnalysis::ModRef;
@@ -390,8 +389,7 @@ AliasAnalysis::callCapturesBefore(const Instruction *I,
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (isNoAlias(AliasAnalysis::Location(*CI),
- AliasAnalysis::Location(Object)))
+ if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
continue;
if (CS.doesNotAccessMemory(ArgNo))
continue;
@@ -431,14 +429,14 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
- return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
+ return DL ? DL->getTypeStoreSize(Ty) : MemoryLocation::UnknownSize;
}
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the location Loc.
///
bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
return canInstructionRangeModRef(BB.front(), BB.back(), Loc, Mod);
}
@@ -449,7 +447,7 @@ bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
/// I1 and I2 must be in the same basic block.
bool AliasAnalysis::canInstructionRangeModRef(const Instruction &I1,
const Instruction &I2,
- const Location &Loc,
+ const MemoryLocation &Loc,
const ModRefResult Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
diff --git a/lib/Analysis/AliasAnalysisCounter.cpp b/lib/Analysis/AliasAnalysisCounter.cpp
index a1bfba1f0026..0112186720bd 100644
--- a/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/lib/Analysis/AliasAnalysisCounter.cpp
@@ -98,22 +98,24 @@ namespace {
}
// FIXME: We could count these too...
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
return getAnalysis<AliasAnalysis>().pointsToConstantMemory(Loc, OrLocal);
}
// Forwarding functions: just delegate to a real AA implementation, counting
// the number of responses...
- AliasResult alias(const Location &LocA, const Location &LocB) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
};
-}
+} // namespace
char AliasAnalysisCounter::ID = 0;
INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
@@ -124,7 +126,8 @@ ModulePass *llvm::createAliasAnalysisCounterPass() {
}
AliasAnalysis::AliasResult
-AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
+AliasAnalysisCounter::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
const char *AliasString = nullptr;
@@ -150,7 +153,7 @@ AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
AliasAnalysis::ModRefResult
AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
const char *MRString = nullptr;
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index dd6a3a0715e1..1501b5f64aa6 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -47,8 +47,8 @@ static cl::opt<bool> EvalAAMD("evaluate-aa-metadata", cl::ReallyHidden);
namespace {
class AAEval : public FunctionPass {
- unsigned NoAlias, MayAlias, PartialAlias, MustAlias;
- unsigned NoModRef, Mod, Ref, ModRef;
+ unsigned NoAliasCount, MayAliasCount, PartialAliasCount, MustAliasCount;
+ unsigned NoModRefCount, ModCount, RefCount, ModRefCount;
public:
static char ID; // Pass identification, replacement for typeid
@@ -62,8 +62,8 @@ namespace {
}
bool doInitialization(Module &M) override {
- NoAlias = MayAlias = PartialAlias = MustAlias = 0;
- NoModRef = Mod = Ref = ModRef = 0;
+ NoAliasCount = MayAliasCount = PartialAliasCount = MustAliasCount = 0;
+ NoModRefCount = ModCount = RefCount = ModRefCount = 0;
if (PrintAll) {
PrintNoAlias = PrintMayAlias = true;
@@ -76,7 +76,7 @@ namespace {
bool runOnFunction(Function &F) override;
bool doFinalization(Module &M) override;
};
-}
+} // namespace
char AAEval::ID = 0;
INITIALIZE_PASS_BEGIN(AAEval, "aa-eval",
@@ -186,29 +186,33 @@ bool AAEval::runOnFunction(Function &F) {
// iterate over the worklist, and run the full (n^2)/2 disambiguations
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
- uint64_t I1Size = AliasAnalysis::UnknownSize;
+ uint64_t I1Size = MemoryLocation::UnknownSize;
Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
- uint64_t I2Size = AliasAnalysis::UnknownSize;
+ uint64_t I2Size = MemoryLocation::UnknownSize;
Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
case AliasAnalysis::NoAlias:
PrintResults("NoAlias", PrintNoAlias, *I1, *I2, F.getParent());
- ++NoAlias; break;
+ ++NoAliasCount;
+ break;
case AliasAnalysis::MayAlias:
PrintResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent());
- ++MayAlias; break;
+ ++MayAliasCount;
+ break;
case AliasAnalysis::PartialAlias:
PrintResults("PartialAlias", PrintPartialAlias, *I1, *I2,
F.getParent());
- ++PartialAlias; break;
+ ++PartialAliasCount;
+ break;
case AliasAnalysis::MustAlias:
PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent());
- ++MustAlias; break;
+ ++MustAliasCount;
+ break;
}
}
}
@@ -224,19 +228,23 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::NoAlias:
PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2,
F.getParent());
- ++NoAlias; break;
+ ++NoAliasCount;
+ break;
case AliasAnalysis::MayAlias:
PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2,
F.getParent());
- ++MayAlias; break;
+ ++MayAliasCount;
+ break;
case AliasAnalysis::PartialAlias:
PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2,
F.getParent());
- ++PartialAlias; break;
+ ++PartialAliasCount;
+ break;
case AliasAnalysis::MustAlias:
PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2,
F.getParent());
- ++MustAlias; break;
+ ++MustAliasCount;
+ break;
}
}
}
@@ -250,19 +258,23 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::NoAlias:
PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2,
F.getParent());
- ++NoAlias; break;
+ ++NoAliasCount;
+ break;
case AliasAnalysis::MayAlias:
PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2,
F.getParent());
- ++MayAlias; break;
+ ++MayAliasCount;
+ break;
case AliasAnalysis::PartialAlias:
PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2,
F.getParent());
- ++PartialAlias; break;
+ ++PartialAliasCount;
+ break;
case AliasAnalysis::MustAlias:
PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2,
F.getParent());
- ++MustAlias; break;
+ ++MustAliasCount;
+ break;
}
}
}
@@ -275,23 +287,27 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
V != Ve; ++V) {
- uint64_t Size = AliasAnalysis::UnknownSize;
+ uint64_t Size = MemoryLocation::UnknownSize;
Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(*C, *V, Size)) {
case AliasAnalysis::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent());
- ++NoModRef; break;
+ ++NoModRefCount;
+ break;
case AliasAnalysis::Mod:
PrintModRefResults("Just Mod", PrintMod, I, *V, F.getParent());
- ++Mod; break;
+ ++ModCount;
+ break;
case AliasAnalysis::Ref:
PrintModRefResults("Just Ref", PrintRef, I, *V, F.getParent());
- ++Ref; break;
+ ++RefCount;
+ break;
case AliasAnalysis::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
- ++ModRef; break;
+ ++ModRefCount;
+ break;
}
}
}
@@ -305,16 +321,20 @@ bool AAEval::runOnFunction(Function &F) {
switch (AA.getModRefInfo(*C, *D)) {
case AliasAnalysis::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
- ++NoModRef; break;
+ ++NoModRefCount;
+ break;
case AliasAnalysis::Mod:
PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
- ++Mod; break;
+ ++ModCount;
+ break;
case AliasAnalysis::Ref:
PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
- ++Ref; break;
+ ++RefCount;
+ break;
case AliasAnalysis::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
- ++ModRef; break;
+ ++ModRefCount;
+ break;
}
}
}
@@ -328,43 +348,47 @@ static void PrintPercent(unsigned Num, unsigned Sum) {
}
bool AAEval::doFinalization(Module &M) {
- unsigned AliasSum = NoAlias + MayAlias + PartialAlias + MustAlias;
+ unsigned AliasSum =
+ NoAliasCount + MayAliasCount + PartialAliasCount + MustAliasCount;
errs() << "===== Alias Analysis Evaluator Report =====\n";
if (AliasSum == 0) {
errs() << " Alias Analysis Evaluator Summary: No pointers!\n";
} else {
errs() << " " << AliasSum << " Total Alias Queries Performed\n";
- errs() << " " << NoAlias << " no alias responses ";
- PrintPercent(NoAlias, AliasSum);
- errs() << " " << MayAlias << " may alias responses ";
- PrintPercent(MayAlias, AliasSum);
- errs() << " " << PartialAlias << " partial alias responses ";
- PrintPercent(PartialAlias, AliasSum);
- errs() << " " << MustAlias << " must alias responses ";
- PrintPercent(MustAlias, AliasSum);
+ errs() << " " << NoAliasCount << " no alias responses ";
+ PrintPercent(NoAliasCount, AliasSum);
+ errs() << " " << MayAliasCount << " may alias responses ";
+ PrintPercent(MayAliasCount, AliasSum);
+ errs() << " " << PartialAliasCount << " partial alias responses ";
+ PrintPercent(PartialAliasCount, AliasSum);
+ errs() << " " << MustAliasCount << " must alias responses ";
+ PrintPercent(MustAliasCount, AliasSum);
errs() << " Alias Analysis Evaluator Pointer Alias Summary: "
- << NoAlias*100/AliasSum << "%/" << MayAlias*100/AliasSum << "%/"
- << PartialAlias*100/AliasSum << "%/"
- << MustAlias*100/AliasSum << "%\n";
+ << NoAliasCount * 100 / AliasSum << "%/"
+ << MayAliasCount * 100 / AliasSum << "%/"
+ << PartialAliasCount * 100 / AliasSum << "%/"
+ << MustAliasCount * 100 / AliasSum << "%\n";
}
// Display the summary for mod/ref analysis
- unsigned ModRefSum = NoModRef + Mod + Ref + ModRef;
+ unsigned ModRefSum = NoModRefCount + ModCount + RefCount + ModRefCount;
if (ModRefSum == 0) {
- errs() << " Alias Analysis Mod/Ref Evaluator Summary: no mod/ref!\n";
+ errs() << " Alias Analysis Mod/Ref Evaluator Summary: no "
+ "mod/ref!\n";
} else {
errs() << " " << ModRefSum << " Total ModRef Queries Performed\n";
- errs() << " " << NoModRef << " no mod/ref responses ";
- PrintPercent(NoModRef, ModRefSum);
- errs() << " " << Mod << " mod responses ";
- PrintPercent(Mod, ModRefSum);
- errs() << " " << Ref << " ref responses ";
- PrintPercent(Ref, ModRefSum);
- errs() << " " << ModRef << " mod & ref responses ";
- PrintPercent(ModRef, ModRefSum);
+ errs() << " " << NoModRefCount << " no mod/ref responses ";
+ PrintPercent(NoModRefCount, ModRefSum);
+ errs() << " " << ModCount << " mod responses ";
+ PrintPercent(ModCount, ModRefSum);
+ errs() << " " << RefCount << " ref responses ";
+ PrintPercent(RefCount, ModRefSum);
+ errs() << " " << ModRefCount << " mod & ref responses ";
+ PrintPercent(ModRefCount, ModRefSum);
errs() << " Alias Analysis Evaluator Mod/Ref Summary: "
- << NoModRef*100/ModRefSum << "%/" << Mod*100/ModRefSum << "%/"
- << Ref*100/ModRefSum << "%/" << ModRef*100/ModRefSum << "%\n";
+ << NoModRefCount * 100 / ModRefSum << "%/"
+ << ModCount * 100 / ModRefSum << "%/" << RefCount * 100 / ModRefSum
+ << "%/" << ModRefCount * 100 / ModRefSum << "%\n";
}
return false;
diff --git a/lib/Analysis/AliasDebugger.cpp b/lib/Analysis/AliasDebugger.cpp
index f98b57819609..fde0eeb43d48 100644
--- a/lib/Analysis/AliasDebugger.cpp
+++ b/lib/Analysis/AliasDebugger.cpp
@@ -94,7 +94,8 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
assert(Vals.find(LocA.Ptr) != Vals.end() &&
"Never seen value in AA before");
assert(Vals.find(LocB.Ptr) != Vals.end() &&
@@ -103,7 +104,7 @@ namespace {
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override {
+ const MemoryLocation &Loc) override {
assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::getModRefInfo(CS, Loc);
}
@@ -113,7 +114,8 @@ namespace {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
@@ -128,7 +130,7 @@ namespace {
}
};
-}
+} // namespace
char AliasDebugger::ID = 0;
INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa",
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index 12c1c7d4af90..f7a803c5f4ce 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -45,13 +45,9 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
PointerRec *R = AS.getSomePointer();
// If the pointers are not a must-alias pair, this set becomes a may alias.
- if (AA.alias(AliasAnalysis::Location(L->getValue(),
- L->getSize(),
- L->getAAInfo()),
- AliasAnalysis::Location(R->getValue(),
- R->getSize(),
- R->getAAInfo()))
- != AliasAnalysis::MustAlias)
+ if (AA.alias(MemoryLocation(L->getValue(), L->getSize(), L->getAAInfo()),
+ MemoryLocation(R->getValue(), R->getSize(), R->getAAInfo())) !=
+ AliasAnalysis::MustAlias)
AliasTy = MayAlias;
}
@@ -106,9 +102,8 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
if (PointerRec *P = getSomePointer()) {
AliasAnalysis &AA = AST.getAliasAnalysis();
AliasAnalysis::AliasResult Result =
- AA.alias(AliasAnalysis::Location(P->getValue(), P->getSize(),
- P->getAAInfo()),
- AliasAnalysis::Location(Entry.getValue(), Size, AAInfo));
+ AA.alias(MemoryLocation(P->getValue(), P->getSize(), P->getAAInfo()),
+ MemoryLocation(Entry.getValue(), Size, AAInfo));
if (Result != AliasAnalysis::MustAlias)
AliasTy = MayAlias;
else // First entry of must alias must have maximum size!
@@ -156,26 +151,24 @@ bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size,
// SOME value in the set.
PointerRec *SomePtr = getSomePointer();
assert(SomePtr && "Empty must-alias set??");
- return AA.alias(AliasAnalysis::Location(SomePtr->getValue(),
- SomePtr->getSize(),
- SomePtr->getAAInfo()),
- AliasAnalysis::Location(Ptr, Size, AAInfo));
+ return AA.alias(MemoryLocation(SomePtr->getValue(), SomePtr->getSize(),
+ SomePtr->getAAInfo()),
+ MemoryLocation(Ptr, Size, AAInfo));
}
// If this is a may-alias set, we have to check all of the pointers in the set
// to be sure it doesn't alias the set...
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.alias(AliasAnalysis::Location(Ptr, Size, AAInfo),
- AliasAnalysis::Location(I.getPointer(), I.getSize(),
- I.getAAInfo())))
+ if (AA.alias(MemoryLocation(Ptr, Size, AAInfo),
+ MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())))
return true;
// Check the unknown instructions...
if (!UnknownInsts.empty()) {
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i)
if (AA.getModRefInfo(UnknownInsts[i],
- AliasAnalysis::Location(Ptr, Size, AAInfo)) !=
- AliasAnalysis::NoModRef)
+ MemoryLocation(Ptr, Size, AAInfo)) !=
+ AliasAnalysis::NoModRef)
return true;
}
@@ -196,10 +189,9 @@ bool AliasSet::aliasesUnknownInst(const Instruction *Inst,
}
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.getModRefInfo(Inst, AliasAnalysis::Location(I.getPointer(),
- I.getSize(),
- I.getAAInfo())) !=
- AliasAnalysis::NoModRef)
+ if (AA.getModRefInfo(
+ Inst, MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())) !=
+ AliasAnalysis::NoModRef)
return true;
return false;
@@ -345,8 +337,8 @@ bool AliasSetTracker::add(VAArgInst *VAAI) {
VAAI->getAAMetadata(AAInfo);
bool NewPtr;
- addPointer(VAAI->getOperand(0), AliasAnalysis::UnknownSize,
- AAInfo, AliasSet::ModRef, NewPtr);
+ addPointer(VAAI->getOperand(0), MemoryLocation::UnknownSize, AAInfo,
+ AliasSet::ModRef, NewPtr);
return NewPtr;
}
@@ -479,7 +471,7 @@ bool AliasSetTracker::remove(VAArgInst *VAAI) {
VAAI->getAAMetadata(AAInfo);
AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0),
- AliasAnalysis::UnknownSize, AAInfo);
+ MemoryLocation::UnknownSize, AAInfo);
if (!AS) return false;
remove(*AS);
return true;
@@ -674,7 +666,7 @@ namespace {
return false;
}
};
-}
+} // namespace
char AliasSetPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(AliasSetPrinter, "print-alias-sets",
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index a61faca2e54e..d11a748e4bf9 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -105,7 +105,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
uint64_t Size;
if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
return Size;
- return AliasAnalysis::UnknownSize;
+ return MemoryLocation::UnknownSize;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified
@@ -146,7 +146,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// reads a bit past the end given sufficient alignment.
uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true);
- return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
+ return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
}
/// isObjectSize - Return true if we can prove that the object specified
@@ -154,7 +154,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
static bool isObjectSize(const Value *V, uint64_t Size,
const DataLayout &DL, const TargetLibraryInfo &TLI) {
uint64_t ObjectSize = getObjectSize(V, DL, TLI);
- return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
+ return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
}
//===----------------------------------------------------------------------===//
@@ -182,7 +182,7 @@ namespace {
return !operator==(Other);
}
};
-}
+} // namespace
/// GetLinearExpression - Analyze the specified value as a linear expression:
@@ -459,7 +459,8 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
assert(AliasCache.empty() && "AliasCache must be cleared after use!");
assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
@@ -475,18 +476,19 @@ namespace {
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override;
/// Get the location associated with a pointer argument of a callsite.
- Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) override;
+ ModRefResult getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) override;
/// getModRefBehavior - Return the behavior when calling the given
/// call site.
@@ -508,7 +510,7 @@ namespace {
private:
// AliasCache - Track alias queries to guard against recursion.
- typedef std::pair<Location, Location> LocPair;
+ typedef std::pair<MemoryLocation, MemoryLocation> LocPair;
typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
AliasCacheTy AliasCache;
@@ -592,8 +594,8 @@ ImmutablePass *llvm::createBasicAliasAnalysisPass() {
/// pointsToConstantMemory - Returns whether the given pointer value
/// points to memory that is local to the function, with global constants being
/// considered local to all functions.
-bool
-BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) {
assert(Visited.empty() && "Visited must be cleared after use!");
unsigned MaxLookup = 8;
@@ -652,6 +654,8 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
return Worklist.empty();
}
+// FIXME: This code is duplicated with MemoryLocation and should be hoisted to
+// some common utility location.
static bool isMemsetPattern16(const Function *MS,
const TargetLibraryInfo &TLI) {
if (TLI.has(LibFunc::memset_pattern16) &&
@@ -715,84 +719,33 @@ BasicAliasAnalysis::getModRefBehavior(const Function *F) {
return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
}
-AliasAnalysis::Location
-BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) {
- Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask);
- const TargetLibraryInfo &TLI =
- getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II != nullptr)
+AliasAnalysis::ModRefResult
+BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
switch (II->getIntrinsicID()) {
- default: break;
+ default:
+ break;
case Intrinsic::memset:
case Intrinsic::memcpy:
- case Intrinsic::memmove: {
+ case Intrinsic::memmove:
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memory intrinsic");
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
- Loc.Size = LenCI->getZExtValue();
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Memory intrinsic location pointer not argument?");
- Mask = ArgIdx ? Ref : Mod;
- break;
- }
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- assert(ArgIdx == 1 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- break;
- }
- case Intrinsic::invariant_end: {
- assert(ArgIdx == 2 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- break;
- }
- case Intrinsic::arm_neon_vld1: {
- assert(ArgIdx == 0 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- // LLVM's vld1 and vst1 intrinsics currently only support a single
- // vector register.
- if (DL)
- Loc.Size = DL->getTypeStoreSize(II->getType());
- break;
- }
- case Intrinsic::arm_neon_vst1: {
- assert(ArgIdx == 0 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- if (DL)
- Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType());
- break;
- }
+ return ArgIdx ? Ref : Mod;
}
// We can bound the aliasing properties of memset_pattern16 just as we can
// for memcpy/memset. This is particularly important because the
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
// whenever possible.
- else if (CS.getCalledFunction() &&
- isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), *TLI)) {
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memset_pattern16");
- if (ArgIdx == 1)
- Loc.Size = 16;
- else if (const ConstantInt *LenCI =
- dyn_cast<ConstantInt>(CS.getArgument(2)))
- Loc.Size = LenCI->getZExtValue();
- assert(Loc.Ptr == CS.getArgument(ArgIdx) &&
- "memset_pattern16 location pointer not argument?");
- Mask = ArgIdx ? Ref : Mod;
+ return ArgIdx ? Ref : Mod;
}
// FIXME: Handle memset_pattern4 and memset_pattern8 also.
- return Loc;
+ return AliasAnalysis::getArgModRefInfo(CS, ArgIdx);
}
static bool isAssumeIntrinsic(ImmutableCallSite CS) {
@@ -814,7 +767,7 @@ bool BasicAliasAnalysis::doInitialization(Module &M) {
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
@@ -850,7 +803,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (!isNoAlias(Location(*CI), Location(Object))) {
+ if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) {
PassedAsArg = true;
break;
}
@@ -902,8 +855,8 @@ aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size,
// If we don't know the size of the accesses through both GEPs, we can't
// determine whether the struct fields accessed can't alias.
- if (V1Size == AliasAnalysis::UnknownSize ||
- V2Size == AliasAnalysis::UnknownSize)
+ if (V1Size == MemoryLocation::UnknownSize ||
+ V2Size == MemoryLocation::UnknownSize)
return AliasAnalysis::MayAlias;
ConstantInt *C1 =
@@ -1017,8 +970,9 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(),
- UnderlyingV2, UnknownSize, AAMDNodes());
+ AliasResult BaseAlias =
+ aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
+ UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
// Check for geps of non-aliasing underlying pointers where the offsets are
// identical.
@@ -1109,11 +1063,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// pointer, we know they cannot alias.
// If both accesses are unknown size, we can't do anything useful here.
- if (V1Size == UnknownSize && V2Size == UnknownSize)
+ if (V1Size == MemoryLocation::UnknownSize &&
+ V2Size == MemoryLocation::UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(),
- V2, V2Size, V2AAInfo);
+ AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
+ AAMDNodes(), V2, V2Size, V2AAInfo);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
// If V2 is known not to alias GEP base pointer, then the two values
@@ -1153,7 +1108,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// greater, we know they do not overlap.
if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
if (GEP1BaseOffset >= 0) {
- if (V2Size != UnknownSize) {
+ if (V2Size != MemoryLocation::UnknownSize) {
if ((uint64_t)GEP1BaseOffset < V2Size)
return PartialAlias;
return NoAlias;
@@ -1167,7 +1122,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// GEP1 V2
// We need to know that V2Size is not unknown, otherwise we might have
// stripped a gep with negative index ('gep <ptr>, -1, ...).
- if (V1Size != UnknownSize && V2Size != UnknownSize) {
+ if (V1Size != MemoryLocation::UnknownSize &&
+ V2Size != MemoryLocation::UnknownSize) {
if (-(uint64_t)GEP1BaseOffset < V1Size)
return PartialAlias;
return NoAlias;
@@ -1218,8 +1174,9 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// mod Modulo. Check whether that difference guarantees that the
// two locations do not alias.
uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
- if (V1Size != UnknownSize && V2Size != UnknownSize &&
- ModOffset >= V2Size && V1Size <= Modulo - ModOffset)
+ if (V1Size != MemoryLocation::UnknownSize &&
+ V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
+ V1Size <= Modulo - ModOffset)
return NoAlias;
// If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
@@ -1302,8 +1259,8 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
// on corresponding edges.
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
- LocPair Locs(Location(PN, PNSize, PNAAInfo),
- Location(V2, V2Size, V2AAInfo));
+ LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
if (PN > V2)
std::swap(Locs.first, Locs.second);
// Analyse the PHIs' inputs under the assumption that the PHIs are
@@ -1457,14 +1414,16 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (DL)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
+ if ((V1Size != MemoryLocation::UnknownSize &&
+ isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
+ (V2Size != MemoryLocation::UnknownSize &&
+ isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
// otherwise infinitely recursive queries.
- LocPair Locs(Location(V1, V1Size, V1AAInfo),
- Location(V2, V2Size, V2AAInfo));
+ LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
if (V1 > V2)
std::swap(Locs.first, Locs.second);
std::pair<AliasCacheTy::iterator, bool> Pair =
@@ -1511,13 +1470,15 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
if (DL && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
+ if ((V1Size != MemoryLocation::UnknownSize &&
+ isObjectSize(O1, V1Size, *DL, *TLI)) ||
+ (V2Size != MemoryLocation::UnknownSize &&
+ isObjectSize(O2, V2Size, *DL, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
- AliasAnalysis::alias(Location(V1, V1Size, V1AAInfo),
- Location(V2, V2Size, V2AAInfo));
+ AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
return AliasCache[Locs] = Result;
}
diff --git a/lib/Analysis/BlockFrequencyInfoImpl.cpp b/lib/Analysis/BlockFrequencyInfoImpl.cpp
index 456cee179f0b..daa77b81d6b3 100644
--- a/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -286,7 +286,7 @@ bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
if (isLoopHeader(Resolved)) {
DEBUG(debugSuccessor("backedge"));
- Dist.addBackedge(OuterLoop->getHeader(), Weight);
+ Dist.addBackedge(Resolved, Weight);
return true;
}
@@ -349,7 +349,10 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
// LoopScale == 1 / ExitMass
// ExitMass == HeadMass - BackedgeMass
- BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass;
+ BlockMass TotalBackedgeMass;
+ for (auto &Mass : Loop.BackedgeMass)
+ TotalBackedgeMass += Mass;
+ BlockMass ExitMass = BlockMass::getFull() - TotalBackedgeMass;
// Block scale stores the inverse of the scale. If this is an infinite loop,
// its exit mass will be zero. In this case, use an arbitrary scale for the
@@ -358,7 +361,7 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
ExitMass.isEmpty() ? InifiniteLoopScale : ExitMass.toScaled().inverse();
DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
- << " - " << Loop.BackedgeMass << ")\n"
+ << " - " << TotalBackedgeMass << ")\n"
<< " - scale = " << Loop.Scale << "\n");
}
@@ -375,6 +378,19 @@ void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
Loop.IsPackaged = true;
}
+#ifndef NDEBUG
+static void debugAssign(const BlockFrequencyInfoImplBase &BFI,
+ const DitheringDistributer &D, const BlockNode &T,
+ const BlockMass &M, const char *Desc) {
+ dbgs() << " => assign " << M << " (" << D.RemMass << ")";
+ if (Desc)
+ dbgs() << " [" << Desc << "]";
+ if (T.isValid())
+ dbgs() << " to " << BFI.getBlockName(T);
+ dbgs() << "\n";
+}
+#endif
+
void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
LoopData *OuterLoop,
Distribution &Dist) {
@@ -384,25 +400,12 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
// Distribute mass to successors as laid out in Dist.
DitheringDistributer D(Dist, Mass);
-#ifndef NDEBUG
- auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
- const char *Desc) {
- dbgs() << " => assign " << M << " (" << D.RemMass << ")";
- if (Desc)
- dbgs() << " [" << Desc << "]";
- if (T.isValid())
- dbgs() << " to " << getBlockName(T);
- dbgs() << "\n";
- };
- (void)debugAssign;
-#endif
-
for (const Weight &W : Dist.Weights) {
// Check for a local edge (non-backedge and non-exit).
BlockMass Taken = D.takeMass(W.Amount);
if (W.Type == Weight::Local) {
Working[W.TargetNode.Index].getMass() += Taken;
- DEBUG(debugAssign(W.TargetNode, Taken, nullptr));
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
continue;
}
@@ -411,15 +414,15 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
// Check for a backedge.
if (W.Type == Weight::Backedge) {
- OuterLoop->BackedgeMass += Taken;
- DEBUG(debugAssign(BlockNode(), Taken, "back"));
+ OuterLoop->BackedgeMass[OuterLoop->getHeaderIndex(W.TargetNode)] += Taken;
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back"));
continue;
}
// This must be an exit.
assert(W.Type == Weight::Exit);
OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
- DEBUG(debugAssign(W.TargetNode, Taken, "exit"));
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit"));
}
}
@@ -595,7 +598,7 @@ template <> struct GraphTraits<IrreducibleGraph> {
static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); }
static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); }
};
-}
+} // namespace llvm
/// \brief Find extra irreducible headers.
///
@@ -713,10 +716,44 @@ BlockFrequencyInfoImplBase::analyzeIrreducible(
void
BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) {
OuterLoop.Exits.clear();
- OuterLoop.BackedgeMass = BlockMass::getEmpty();
+ for (auto &Mass : OuterLoop.BackedgeMass)
+ Mass = BlockMass::getEmpty();
auto O = OuterLoop.Nodes.begin() + 1;
for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I)
if (!Working[I->Index].isPackaged())
*O++ = *I;
OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end());
}
+
+void BlockFrequencyInfoImplBase::adjustLoopHeaderMass(LoopData &Loop) {
+ assert(Loop.isIrreducible() && "this only makes sense on irreducible loops");
+
+ // Since the loop has more than one header block, the mass flowing back into
+ // each header will be different. Adjust the mass in each header loop to
+ // reflect the masses flowing through back edges.
+ //
+ // To do this, we distribute the initial mass using the backedge masses
+ // as weights for the distribution.
+ BlockMass LoopMass = BlockMass::getFull();
+ Distribution Dist;
+
+ DEBUG(dbgs() << "adjust-loop-header-mass:\n");
+ for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
+ auto &HeaderNode = Loop.Nodes[H];
+ auto &BackedgeMass = Loop.BackedgeMass[Loop.getHeaderIndex(HeaderNode)];
+ DEBUG(dbgs() << " - Add back edge mass for node "
+ << getBlockName(HeaderNode) << ": " << BackedgeMass << "\n");
+ Dist.addLocal(HeaderNode, BackedgeMass.getMass());
+ }
+
+ DitheringDistributer D(Dist, LoopMass);
+
+ DEBUG(dbgs() << " Distribute loop mass " << LoopMass
+ << " to headers using above weights\n");
+ for (const Weight &W : Dist.Weights) {
+ BlockMass Taken = D.takeMass(W.Amount);
+ assert(W.Type == Weight::Local && "all weights should be local");
+ Working[W.TargetNode.Index].getMass() = Taken;
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
+ }
+}
diff --git a/lib/Analysis/CFGPrinter.cpp b/lib/Analysis/CFGPrinter.cpp
index c86f1f55954b..edd02c2fa0b2 100644
--- a/lib/Analysis/CFGPrinter.cpp
+++ b/lib/Analysis/CFGPrinter.cpp
@@ -40,7 +40,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGViewer::ID = 0;
INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true)
@@ -63,7 +63,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGOnlyViewer::ID = 0;
INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only",
@@ -97,7 +97,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGPrinter::ID = 0;
INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file",
@@ -130,7 +130,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGOnlyPrinter::ID = 0;
INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
diff --git a/lib/Analysis/CFLAliasAnalysis.cpp b/lib/Analysis/CFLAliasAnalysis.cpp
index 84b31dff055a..d937c0b2198a 100644
--- a/lib/Analysis/CFLAliasAnalysis.cpp
+++ b/lib/Analysis/CFLAliasAnalysis.cpp
@@ -14,8 +14,7 @@
// Alias Analysis" by Zhang Q, Lyu M R, Yuan H, and Su Z. -- to summarize the
// papers, we build a graph of the uses of a variable, where each node is a
// memory location, and each edge is an action that happened on that memory
-// location. The "actions" can be one of Dereference, Reference, Assign, or
-// Assign.
+// location. The "actions" can be one of Dereference, Reference, or Assign.
//
// Two variables are considered as aliasing iff you can reach one value's node
// from the other value's node and the language formed by concatenating all of
@@ -219,9 +218,10 @@ public:
return Iter->second;
}
- AliasResult query(const Location &LocA, const Location &LocB);
+ AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
if (LocA.Ptr == LocB.Ptr) {
if (LocA.Size == LocB.Size) {
return MustAlias;
@@ -539,6 +539,19 @@ public:
Output.push_back(Edge(&Inst, From1, EdgeType::Assign, AttrNone));
Output.push_back(Edge(&Inst, From2, EdgeType::Assign, AttrNone));
}
+
+ void visitConstantExpr(ConstantExpr *CE) {
+ switch (CE->getOpcode()) {
+ default:
+ llvm_unreachable("Unknown instruction type encountered!");
+// Build the switch statement using the Instruction.def file.
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+ case Instruction::OPCODE: \
+ visit##OPCODE(*(CLASS *)CE); \
+ break;
+#include "llvm/IR/Instruction.def"
+ }
+ }
};
// For a given instruction, we need to know which Value* to get the
@@ -712,7 +725,7 @@ public:
typedef WeightedBidirectionalGraph<std::pair<EdgeType, StratifiedAttrs>> GraphT;
typedef DenseMap<Value *, GraphT::Node> NodeMapT;
-}
+} // namespace
// -- Setting up/registering CFLAA pass -- //
char CFLAliasAnalysis::ID = 0;
@@ -741,6 +754,10 @@ static EdgeType flipWeight(EdgeType);
static void argsToEdges(CFLAliasAnalysis &, Instruction *,
SmallVectorImpl<Edge> &);
+// Gets edges of the given ConstantExpr*, writing them to the SmallVector*.
+static void argsToEdges(CFLAliasAnalysis &, ConstantExpr *,
+ SmallVectorImpl<Edge> &);
+
// Gets the "Level" that one should travel in StratifiedSets
// given an EdgeType.
static Level directionOfEdgeType(EdgeType);
@@ -807,6 +824,13 @@ static bool hasUsefulEdges(Instruction *Inst) {
return !isa<CmpInst>(Inst) && !isa<FenceInst>(Inst) && !IsNonInvokeTerminator;
}
+static bool hasUsefulEdges(ConstantExpr *CE) {
+ // ConstantExpr doens't have terminators, invokes, or fences, so only needs
+ // to check for compares.
+ return CE->getOpcode() != Instruction::ICmp &&
+ CE->getOpcode() != Instruction::FCmp;
+}
+
static Optional<StratifiedAttr> valueToAttrIndex(Value *Val) {
if (isa<GlobalValue>(Val))
return AttrGlobalIndex;
@@ -846,6 +870,13 @@ static void argsToEdges(CFLAliasAnalysis &Analysis, Instruction *Inst,
v.visit(Inst);
}
+static void argsToEdges(CFLAliasAnalysis &Analysis, ConstantExpr *CE,
+ SmallVectorImpl<Edge> &Output) {
+ assert(hasUsefulEdges(CE) && "Expected constant expr to have 'useful' edges");
+ GetEdgesVisitor v(Analysis, Output);
+ v.visitConstantExpr(CE);
+}
+
static Level directionOfEdgeType(EdgeType Weight) {
switch (Weight) {
case EdgeType::Reference:
@@ -865,25 +896,23 @@ static void constexprToEdges(CFLAliasAnalysis &Analysis,
Worklist.push_back(&CExprToCollapse);
SmallVector<Edge, 8> ConstexprEdges;
+ SmallPtrSet<ConstantExpr *, 4> Visited;
while (!Worklist.empty()) {
auto *CExpr = Worklist.pop_back_val();
- std::unique_ptr<Instruction> Inst(CExpr->getAsInstruction());
- if (!hasUsefulEdges(Inst.get()))
+ if (!hasUsefulEdges(CExpr))
continue;
ConstexprEdges.clear();
- argsToEdges(Analysis, Inst.get(), ConstexprEdges);
+ argsToEdges(Analysis, CExpr, ConstexprEdges);
for (auto &Edge : ConstexprEdges) {
- if (Edge.From == Inst.get())
- Edge.From = CExpr;
- else if (auto *Nested = dyn_cast<ConstantExpr>(Edge.From))
- Worklist.push_back(Nested);
-
- if (Edge.To == Inst.get())
- Edge.To = CExpr;
- else if (auto *Nested = dyn_cast<ConstantExpr>(Edge.To))
- Worklist.push_back(Nested);
+ if (auto *Nested = dyn_cast<ConstantExpr>(Edge.From))
+ if (Visited.insert(Nested).second)
+ Worklist.push_back(Nested);
+
+ if (auto *Nested = dyn_cast<ConstantExpr>(Edge.To))
+ if (Visited.insert(Nested).second)
+ Worklist.push_back(Nested);
}
Results.append(ConstexprEdges.begin(), ConstexprEdges.end());
@@ -1080,9 +1109,8 @@ void CFLAliasAnalysis::scan(Function *Fn) {
Handles.push_front(FunctionHandle(Fn, this));
}
-AliasAnalysis::AliasResult
-CFLAliasAnalysis::query(const AliasAnalysis::Location &LocA,
- const AliasAnalysis::Location &LocB) {
+AliasAnalysis::AliasResult CFLAliasAnalysis::query(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
auto *ValA = const_cast<Value *>(LocA.Ptr);
auto *ValB = const_cast<Value *>(LocB.Ptr);
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index 5a5475417951..92f6932bf8b9 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -110,7 +110,7 @@ namespace {
bool Captured;
};
-}
+} // namespace
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
diff --git a/lib/Analysis/DivergenceAnalysis.cpp b/lib/Analysis/DivergenceAnalysis.cpp
index e5ee2959c15d..3765adf4d98c 100644
--- a/lib/Analysis/DivergenceAnalysis.cpp
+++ b/lib/Analysis/DivergenceAnalysis.cpp
@@ -284,7 +284,7 @@ void DivergencePropagator::propagate() {
}
}
-} /// end namespace anonymous
+} // namespace
FunctionPass *llvm::createDivergenceAnalysisPass() {
return new DivergenceAnalysis();
diff --git a/lib/Analysis/DomPrinter.cpp b/lib/Analysis/DomPrinter.cpp
index 0c880df54f8e..0e0d174c2a48 100644
--- a/lib/Analysis/DomPrinter.cpp
+++ b/lib/Analysis/DomPrinter.cpp
@@ -78,7 +78,7 @@ struct DOTGraphTraits<PostDominatorTree*>
return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
}
};
-}
+} // namespace llvm
namespace {
struct DominatorTreeWrapperPassAnalysisGraphTraits {
diff --git a/lib/Analysis/IPA/CallGraph.cpp b/lib/Analysis/IPA/CallGraph.cpp
index 67cf7f86e072..e2799d965a7d 100644
--- a/lib/Analysis/IPA/CallGraph.cpp
+++ b/lib/Analysis/IPA/CallGraph.cpp
@@ -24,8 +24,8 @@ CallGraph::CallGraph(Module &M)
: M(M), Root(nullptr), ExternalCallingNode(getOrInsertFunction(nullptr)),
CallsExternalNode(new CallGraphNode(nullptr)) {
// Add every function to the call graph.
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
- addToCallGraph(I);
+ for (Function &F : M)
+ addToCallGraph(&F);
// If we didn't find a main function, use the external call graph node
if (!Root)
@@ -40,13 +40,11 @@ CallGraph::~CallGraph() {
// Reset all node's use counts to zero before deleting them to prevent an
// assertion from firing.
#ifndef NDEBUG
- for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
- I != E; ++I)
- I->second->allReferencesDropped();
+ for (auto &I : FunctionMap)
+ I.second->allReferencesDropped();
#endif
- for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
- I != E; ++I)
- delete I->second;
+ for (auto &I : FunctionMap)
+ delete I.second;
}
void CallGraph::addToCallGraph(Function *F) {
@@ -81,8 +79,10 @@ void CallGraph::addToCallGraph(Function *F) {
CallSite CS(cast<Value>(II));
if (CS) {
const Function *Callee = CS.getCalledFunction();
- if (!Callee)
+ if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID()))
// Indirect calls of intrinsics are not allowed so no need to check.
+ // We can be more precise here by using TargetArg returned by
+ // Intrinsic::isLeaf.
Node->addCalledFunction(CS, CallsExternalNode);
else if (!Callee->isIntrinsic())
Node->addCalledFunction(CS, getOrInsertFunction(Callee));
@@ -98,8 +98,26 @@ void CallGraph::print(raw_ostream &OS) const {
OS << "<<null function: 0x" << Root << ">>\n";
}
- for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I)
- I->second->print(OS);
+ // Print in a deterministic order by sorting CallGraphNodes by name. We do
+ // this here to avoid slowing down the non-printing fast path.
+
+ SmallVector<CallGraphNode *, 16> Nodes;
+ Nodes.reserve(FunctionMap.size());
+
+ for (auto I = begin(), E = end(); I != E; ++I)
+ Nodes.push_back(I->second);
+
+ std::sort(Nodes.begin(), Nodes.end(),
+ [](CallGraphNode *LHS, CallGraphNode *RHS) {
+ if (Function *LF = LHS->getFunction())
+ if (Function *RF = RHS->getFunction())
+ return LF->getName() < RF->getName();
+
+ return RHS->getFunction() != nullptr;
+ });
+
+ for (CallGraphNode *CN : Nodes)
+ CN->print(OS);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/lib/Analysis/IPA/CallGraphSCCPass.cpp b/lib/Analysis/IPA/CallGraphSCCPass.cpp
index 65ba1c7c6c47..6b3e06346269 100644
--- a/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -217,8 +217,10 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
// another value. This can happen when constant folding happens
// of well known functions etc.
!CallSite(I->first) ||
- (CallSite(I->first).getCalledFunction() &&
- CallSite(I->first).getCalledFunction()->isIntrinsic())) {
+ (CallSite(I->first).getCalledFunction() &&
+ CallSite(I->first).getCalledFunction()->isIntrinsic() &&
+ Intrinsic::isLeaf(
+ CallSite(I->first).getCalledFunction()->getIntrinsicID()))) {
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
diff --git a/lib/Analysis/IPA/CallPrinter.cpp b/lib/Analysis/IPA/CallPrinter.cpp
index 68dcd3c06427..f183625dd776 100644
--- a/lib/Analysis/IPA/CallPrinter.cpp
+++ b/lib/Analysis/IPA/CallPrinter.cpp
@@ -41,7 +41,7 @@ struct AnalysisCallGraphWrapperPassTraits {
}
};
-} // end llvm namespace
+} // namespace llvm
namespace {
diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp
index 018ae99d6618..a32631d0c3b2 100644
--- a/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -115,9 +115,10 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Location &LocA, const Location &LocB) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1, CS2);
@@ -188,7 +189,7 @@ namespace {
GlobalValue *OkayStoreDest = nullptr);
bool AnalyzeIndirectGlobalMemory(GlobalValue *GV);
};
-}
+} // namespace
char GlobalsModRef::ID = 0;
INITIALIZE_AG_PASS_BEGIN(GlobalsModRef, AliasAnalysis,
@@ -478,9 +479,8 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
/// alias - If one of the pointers is to a global that we are tracking, and the
/// other is some random pointer, we know there cannot be an alias, because the
/// address of the global isn't taken.
-AliasAnalysis::AliasResult
-GlobalsModRef::alias(const Location &LocA,
- const Location &LocB) {
+AliasAnalysis::AliasResult GlobalsModRef::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
// Get the base object these pointers point to.
const Value *UV1 = GetUnderlyingObject(LocA.Ptr, *DL);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr, *DL);
@@ -535,8 +535,7 @@ GlobalsModRef::alias(const Location &LocA,
}
AliasAnalysis::ModRefResult
-GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+GlobalsModRef::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
unsigned Known = ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
diff --git a/lib/Analysis/InstCount.cpp b/lib/Analysis/InstCount.cpp
index de2b9c0c56db..e76d26e8530b 100644
--- a/lib/Analysis/InstCount.cpp
+++ b/lib/Analysis/InstCount.cpp
@@ -64,7 +64,7 @@ namespace {
void print(raw_ostream &O, const Module *M) const override {}
};
-}
+} // namespace
char InstCount::ID = 0;
INITIALIZE_PASS(InstCount, "instcount",
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index ec56d888dc2f..12e406bb1a2d 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -854,8 +854,8 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
return X;
}
- // fsub nnan ninf x, x ==> 0.0
- if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
+ // fsub nnan x, x ==> 0.0
+ if (FMF.noNaNs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
return nullptr;
@@ -1126,6 +1126,21 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
return Op0;
+ if (FMF.noNaNs()) {
+ // X / X -> 1.0 is legal when NaNs are ignored.
+ if (Op0 == Op1)
+ return ConstantFP::get(Op0->getType(), 1.0);
+
+ // -X / X -> -1.0 and
+ // X / -X -> -1.0 are legal when NaNs are ignored.
+ // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
+ if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
+ BinaryOperator::getFNegArgument(Op0) == Op1) ||
+ (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
+ BinaryOperator::getFNegArgument(Op1) == Op0))
+ return ConstantFP::get(Op0->getType(), -1.0);
+ }
+
return nullptr;
}
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index e6f586ac7029..f421d286e842 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -286,7 +286,7 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
<< Val.getConstantRange().getUpper() << '>';
return OS << "constant<" << *Val.getConstant() << '>';
}
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// LazyValueInfoCache Decl
@@ -306,7 +306,7 @@ namespace {
deleted();
}
};
-}
+} // namespace
namespace {
/// This is the cache kept by LazyValueInfo which
@@ -1262,8 +1262,40 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
const DataLayout &DL = CxtI->getModule()->getDataLayout();
LVILatticeVal Result = getCache(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
-
- return getPredicateResult(Pred, C, Result, DL, TLI);
+ Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
+ if (Ret != Unknown)
+ return Ret;
+
+ // TODO: Move this logic inside getValueAt so that it can be cached rather
+ // than re-queried on each call. This would also allow us to merge the
+ // underlying lattice values to get more information
+ if (CxtI) {
+ // For a comparison where the V is outside this block, it's possible
+ // that we've branched on it before. Look to see if the value is known
+ // on all incoming edges.
+ BasicBlock *BB = CxtI->getParent();
+ pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
+ if (PI != PE &&
+ (!isa<Instruction>(V) ||
+ cast<Instruction>(V)->getParent() != BB)) {
+ // For predecessor edge, determine if the comparison is true or false
+ // on that edge. If they're all true or all false, we can conclude
+ // the value of the comparison in this block.
+ Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
+ if (Baseline != Unknown) {
+ // Check that all remaining incoming values match the first one.
+ while (++PI != PE) {
+ Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
+ if (Ret != Baseline) break;
+ }
+ // If we terminated early, then one of the values didn't match.
+ if (PI == PE) {
+ return Baseline;
+ }
+ }
+ }
+ }
+ return Unknown;
}
void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
diff --git a/lib/Analysis/LibCallAliasAnalysis.cpp b/lib/Analysis/LibCallAliasAnalysis.cpp
index f6025e3252e3..991a0e3e2752 100644
--- a/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -48,7 +48,7 @@ bool LibCallAliasAnalysis::runOnFunction(Function &F) {
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
// If we have a function, check to see what kind of mod/ref effects it
// has. Start by including any info globally known about the function.
AliasAnalysis::ModRefResult MRInfo = FI->UniversalBehavior;
@@ -122,7 +122,7 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
//
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
ModRefResult MRInfo = ModRef;
// If this is a direct call to a function that LCI knows about, get the
diff --git a/lib/Analysis/LibCallSemantics.cpp b/lib/Analysis/LibCallSemantics.cpp
index e98540ba7e90..003c81e87b60 100644
--- a/lib/Analysis/LibCallSemantics.cpp
+++ b/lib/Analysis/LibCallSemantics.cpp
@@ -80,9 +80,8 @@ EHPersonality llvm::classifyEHPersonality(const Value *Pers) {
.Default(EHPersonality::Unknown);
}
-bool llvm::canSimplifyInvokeNoUnwind(const InvokeInst *II) {
- const LandingPadInst *LP = II->getLandingPadInst();
- EHPersonality Personality = classifyEHPersonality(LP->getPersonalityFn());
+bool llvm::canSimplifyInvokeNoUnwind(const Function *F) {
+ EHPersonality Personality = classifyEHPersonality(F->getPersonalityFn());
// We can't simplify any invokes to nounwind functions if the personality
// function wants to catch asynch exceptions. The nounwind attribute only
// implies that the function does not throw synchronous exceptions.
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 65a90d7bcd87..6ea6ccbfbe99 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -157,7 +157,7 @@ namespace {
WriteValues({V1, Vs...});
}
};
-}
+} // namespace
char Lint::ID = 0;
INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
@@ -202,8 +202,8 @@ void Lint::visitCallSite(CallSite CS) {
Value *Callee = CS.getCalledValue();
const DataLayout &DL = CS->getModule()->getDataLayout();
- visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Callee);
+ visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr,
+ MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee, DL,
/*OffsetOk=*/false))) {
@@ -282,12 +282,10 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memcpy: {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), nullptr,
- MemRef::Write);
- visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), nullptr,
- MemRef::Read);
+ visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize,
+ MCI->getAlignment(), nullptr, MemRef::Write);
+ visitMemoryReference(I, MCI->getSource(), MemoryLocation::UnknownSize,
+ MCI->getAlignment(), nullptr, MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
// isn't expressive enough for what we really want to do. Known partial
@@ -306,20 +304,17 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memmove: {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), nullptr,
- MemRef::Write);
- visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), nullptr,
- MemRef::Read);
+ visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize,
+ MMI->getAlignment(), nullptr, MemRef::Write);
+ visitMemoryReference(I, MMI->getSource(), MemoryLocation::UnknownSize,
+ MMI->getAlignment(), nullptr, MemRef::Read);
break;
}
case Intrinsic::memset: {
MemSetInst *MSI = cast<MemSetInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
- MSI->getAlignment(), nullptr,
- MemRef::Write);
+ visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize,
+ MSI->getAlignment(), nullptr, MemRef::Write);
break;
}
@@ -328,26 +323,26 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: va_start called in a non-varargs function",
&I);
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Write);
- visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(1), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read);
break;
case Intrinsic::vaend:
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::stackrestore:
// Stackrestore doesn't read or write memory, but it sets the
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::eh_begincatch:
@@ -435,7 +430,7 @@ void Lint::visitMemoryReference(Instruction &I,
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
// something we can handle and if so extract the size of this base object
// along with its alignment.
- uint64_t BaseSize = AliasAnalysis::UnknownSize;
+ uint64_t BaseSize = MemoryLocation::UnknownSize;
unsigned BaseAlign = 0;
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
@@ -460,8 +455,8 @@ void Lint::visitMemoryReference(Instruction &I,
// Accesses from before the start or after the end of the object are not
// defined.
- Assert(Size == AliasAnalysis::UnknownSize ||
- BaseSize == AliasAnalysis::UnknownSize ||
+ Assert(Size == MemoryLocation::UnknownSize ||
+ BaseSize == MemoryLocation::UnknownSize ||
(Offset >= 0 && Offset + Size <= BaseSize),
"Undefined behavior: Buffer overflow", &I);
@@ -770,12 +765,12 @@ void Lint::visitAllocaInst(AllocaInst &I) {
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0,
+ visitMemoryReference(I, I.getOperand(0), MemoryLocation::UnknownSize, 0,
nullptr, MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0,
+ visitMemoryReference(I, I.getAddress(), MemoryLocation::UnknownSize, 0,
nullptr, MemRef::Branchee);
Assert(I.getNumDestinations() != 0,
diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp
index c661c7b87dcb..8425b75f3ff9 100644
--- a/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/lib/Analysis/LoopAccessAnalysis.cpp
@@ -210,18 +210,18 @@ public:
: DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckNeeded(false) {}
/// \brief Register a load and whether it is only read from.
- void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
+ void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
Value *Ptr = const_cast<Value*>(Loc.Ptr);
- AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
+ AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
Accesses.insert(MemAccessInfo(Ptr, false));
if (IsReadOnly)
ReadOnlyPtr.insert(Ptr);
}
/// \brief Register a store.
- void addStore(AliasAnalysis::Location &Loc) {
+ void addStore(MemoryLocation &Loc) {
Value *Ptr = const_cast<Value*>(Loc.Ptr);
- AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
+ AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
Accesses.insert(MemAccessInfo(Ptr, true));
}
@@ -1150,7 +1150,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
if (Seen.insert(Ptr).second) {
++NumReadWrites;
- AliasAnalysis::Location Loc = MemoryLocation::get(ST);
+ MemoryLocation Loc = MemoryLocation::get(ST);
// The TBAA metadata could have a control dependency on the predication
// condition, so we cannot rely on it when determining whether or not we
// need runtime pointer checks.
@@ -1186,7 +1186,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
IsReadOnlyPtr = true;
}
- AliasAnalysis::Location Loc = MemoryLocation::get(LD);
+ MemoryLocation Loc = MemoryLocation::get(LD);
// The TBAA metadata could have a control dependency on the predication
// condition, so we cannot rely on it when determining whether or not we
// need runtime pointer checks.
diff --git a/lib/Analysis/LoopPass.cpp b/lib/Analysis/LoopPass.cpp
index e9fcf02118b9..81b7ecd480bf 100644
--- a/lib/Analysis/LoopPass.cpp
+++ b/lib/Analysis/LoopPass.cpp
@@ -56,7 +56,7 @@ public:
};
char PrintLoopPass::ID = 0;
-}
+} // namespace
//===----------------------------------------------------------------------===//
// LPPassManager
diff --git a/lib/Analysis/MemDepPrinter.cpp b/lib/Analysis/MemDepPrinter.cpp
index da3b829b6d31..54a04d9856b7 100644
--- a/lib/Analysis/MemDepPrinter.cpp
+++ b/lib/Analysis/MemDepPrinter.cpp
@@ -74,7 +74,7 @@ namespace {
return InstTypePair(inst, type);
}
};
-}
+} // namespace
char MemDepPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDepPrinter, "print-memdeps",
diff --git a/lib/Analysis/MemDerefPrinter.cpp b/lib/Analysis/MemDerefPrinter.cpp
index fa292a28ec87..b0194d33d0e8 100644
--- a/lib/Analysis/MemDerefPrinter.cpp
+++ b/lib/Analysis/MemDerefPrinter.cpp
@@ -37,7 +37,7 @@ namespace {
Vec.clear();
}
};
-}
+} // namespace
char MemDerefPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDerefPrinter, "print-memderefs",
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 255bae61eb2f..cf8ba5ccb725 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -118,10 +118,8 @@ static void RemoveFromReverseMap(DenseMap<Instruction*,
/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
/// Return a ModRefInfo value describing the general behavior of the
/// instruction.
-static
-AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
- AliasAnalysis::Location &Loc,
- AliasAnalysis *AA) {
+static AliasAnalysis::ModRefResult
+GetLocation(const Instruction *Inst, MemoryLocation &Loc, AliasAnalysis *AA) {
if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isUnordered()) {
Loc = MemoryLocation::get(LI);
@@ -131,7 +129,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
Loc = MemoryLocation::get(LI);
return AliasAnalysis::ModRef;
}
- Loc = AliasAnalysis::Location();
+ Loc = MemoryLocation();
return AliasAnalysis::ModRef;
}
@@ -144,7 +142,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
Loc = MemoryLocation::get(SI);
return AliasAnalysis::ModRef;
}
- Loc = AliasAnalysis::Location();
+ Loc = MemoryLocation();
return AliasAnalysis::ModRef;
}
@@ -155,7 +153,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure
- Loc = AliasAnalysis::Location(CI->getArgOperand(0));
+ Loc = MemoryLocation(CI->getArgOperand(0));
return AliasAnalysis::Mod;
}
@@ -167,17 +165,17 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
II->getAAMetadata(AAInfo);
- Loc = AliasAnalysis::Location(II->getArgOperand(1),
- cast<ConstantInt>(II->getArgOperand(0))
- ->getZExtValue(), AAInfo);
+ Loc = MemoryLocation(
+ II->getArgOperand(1),
+ cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return AliasAnalysis::Mod;
case Intrinsic::invariant_end:
II->getAAMetadata(AAInfo);
- Loc = AliasAnalysis::Location(II->getArgOperand(2),
- cast<ConstantInt>(II->getArgOperand(1))
- ->getZExtValue(), AAInfo);
+ Loc = MemoryLocation(
+ II->getArgOperand(2),
+ cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return AliasAnalysis::Mod;
@@ -212,7 +210,7 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
- AliasAnalysis::Location Loc;
+ MemoryLocation Loc;
AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
if (Loc.Ptr) {
// A simple instruction.
@@ -259,9 +257,10 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
///
/// MemLocBase, MemLocOffset are lazily computed here the first time the
/// base/offs of memloc is needed.
-static bool isLoadLoadClobberIfExtendedToFullWidth(
- const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase,
- int64_t &MemLocOffs, const LoadInst *LI) {
+static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc,
+ const Value *&MemLocBase,
+ int64_t &MemLocOffs,
+ const LoadInst *LI) {
const DataLayout &DL = LI->getModule()->getDataLayout();
// If we haven't already computed the base/offset of MemLoc, do so now.
@@ -368,10 +367,9 @@ static bool isVolatile(Instruction *Inst) {
/// with reads from read-only locations. If possible, pass the query
/// instruction as well; this function may take advantage of the metadata
/// annotated to the query instruction to refine the result.
-MemDepResult MemoryDependenceAnalysis::
-getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
- BasicBlock::iterator ScanIt, BasicBlock *BB,
- Instruction *QueryInst) {
+MemDepResult MemoryDependenceAnalysis::getPointerDependencyFrom(
+ const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
+ BasicBlock *BB, Instruction *QueryInst) {
const Value *MemLocBase = nullptr;
int64_t MemLocOffset = 0;
@@ -440,8 +438,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point (the right approach is to use
// GetPointerBaseWithConstantOffset).
- if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
- MemLoc))
+ if (AA->isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
return MemDepResult::getDef(II);
continue;
}
@@ -486,7 +483,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
}
}
- AliasAnalysis::Location LoadLoc = MemoryLocation::get(LI);
+ MemoryLocation LoadLoc = MemoryLocation::get(LI);
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
@@ -575,7 +572,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// Ok, this store might clobber the query pointer. Check to see if it is
// a must alias: in this case, we want to return this as a def.
- AliasAnalysis::Location StoreLoc = MemoryLocation::get(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
@@ -679,7 +676,7 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
else
LocalCache = MemDepResult::getNonFuncLocal();
} else {
- AliasAnalysis::Location MemLoc;
+ MemoryLocation MemLoc;
AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
if (MemLoc.Ptr) {
// If we can do a pointer scan, make it happen.
@@ -872,7 +869,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
void MemoryDependenceAnalysis::
getNonLocalPointerDependency(Instruction *QueryInst,
SmallVectorImpl<NonLocalDepResult> &Result) {
- const AliasAnalysis::Location Loc = MemoryLocation::get(QueryInst);
+ const MemoryLocation Loc = MemoryLocation::get(QueryInst);
bool isLoad = isa<LoadInst>(QueryInst);
BasicBlock *FromBB = QueryInst->getParent();
assert(FromBB);
@@ -924,11 +921,9 @@ getNonLocalPointerDependency(Instruction *QueryInst,
/// Pointer/PointeeSize using either cached information in Cache or by doing a
/// lookup (which may use dirty cache info if available). If we do a lookup,
/// add the result to the cache.
-MemDepResult MemoryDependenceAnalysis::
-GetNonLocalInfoForBlock(Instruction *QueryInst,
- const AliasAnalysis::Location &Loc,
- bool isLoad, BasicBlock *BB,
- NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
+MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock(
+ Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
+ BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
@@ -1040,14 +1035,11 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
/// This function returns false on success, or true to indicate that it could
/// not compute dependence information for some reason. This should be treated
/// as a clobber dependence on the first instruction in the predecessor block.
-bool MemoryDependenceAnalysis::
-getNonLocalPointerDepFromBB(Instruction *QueryInst,
- const PHITransAddr &Pointer,
- const AliasAnalysis::Location &Loc,
- bool isLoad, BasicBlock *StartBB,
- SmallVectorImpl<NonLocalDepResult> &Result,
- DenseMap<BasicBlock*, Value*> &Visited,
- bool SkipFirstBlock) {
+bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB(
+ Instruction *QueryInst, const PHITransAddr &Pointer,
+ const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
+ SmallVectorImpl<NonLocalDepResult> &Result,
+ DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
// Look up the cached info for Pointer.
ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
diff --git a/lib/Analysis/MemoryLocation.cpp b/lib/Analysis/MemoryLocation.cpp
index f87a017b9211..e4491261e055 100644
--- a/lib/Analysis/MemoryLocation.cpp
+++ b/lib/Analysis/MemoryLocation.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
@@ -88,3 +89,86 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MTI) {
return MemoryLocation(MTI->getRawDest(), Size, AATags);
}
+
+// FIXME: This code is duplicated with BasicAliasAnalysis and should be hoisted
+// to some common utility location.
+static bool isMemsetPattern16(const Function *MS,
+ const TargetLibraryInfo &TLI) {
+ if (TLI.has(LibFunc::memset_pattern16) &&
+ MS->getName() == "memset_pattern16") {
+ FunctionType *MemsetType = MS->getFunctionType();
+ if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
+ isa<PointerType>(MemsetType->getParamType(0)) &&
+ isa<PointerType>(MemsetType->getParamType(1)) &&
+ isa<IntegerType>(MemsetType->getParamType(2)))
+ return true;
+ }
+
+ return false;
+}
+
+MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
+ unsigned ArgIdx,
+ const TargetLibraryInfo &TLI) {
+ AAMDNodes AATags;
+ CS->getAAMetadata(AATags);
+ const Value *Arg = CS.getArgument(ArgIdx);
+
+ // We may be able to produce an exact size for known intrinsics.
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+ const DataLayout &DL = II->getModule()->getDataLayout();
+
+ switch (II->getIntrinsicID()) {
+ default:
+ break;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memory intrinsic");
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+ return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+ break;
+
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ assert(ArgIdx == 1 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AATags);
+
+ case Intrinsic::invariant_end:
+ assert(ArgIdx == 2 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AATags);
+
+ case Intrinsic::arm_neon_vld1:
+ assert(ArgIdx == 0 && "Invalid argument index");
+ // LLVM's vld1 and vst1 intrinsics currently only support a single
+ // vector register.
+ return MemoryLocation(Arg, DL.getTypeStoreSize(II->getType()), AATags);
+
+ case Intrinsic::arm_neon_vst1:
+ assert(ArgIdx == 0 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, DL.getTypeStoreSize(II->getArgOperand(1)->getType()), AATags);
+ }
+ }
+
+ // We can bound the aliasing properties of memset_pattern16 just as we can
+ // for memcpy/memset. This is particularly important because the
+ // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
+ // whenever possible.
+ if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memset_pattern16");
+ if (ArgIdx == 1)
+ return MemoryLocation(Arg, 16, AATags);
+ if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
+ return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+ }
+ // FIXME: Handle memset_pattern4 and memset_pattern8 also.
+
+ return MemoryLocation(CS.getArgument(ArgIdx), UnknownSize, AATags);
+}
diff --git a/lib/Analysis/ModuleDebugInfoPrinter.cpp b/lib/Analysis/ModuleDebugInfoPrinter.cpp
index 36c47141a45f..45ae818c35bf 100644
--- a/lib/Analysis/ModuleDebugInfoPrinter.cpp
+++ b/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -40,7 +40,7 @@ namespace {
}
void print(raw_ostream &O, const Module *M) const override;
};
-}
+} // namespace
char ModuleDebugInfoPrinter::ID = 0;
INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
diff --git a/lib/Analysis/NoAliasAnalysis.cpp b/lib/Analysis/NoAliasAnalysis.cpp
index 203e1daf7a09..7617622b9ab6 100644
--- a/lib/Analysis/NoAliasAnalysis.cpp
+++ b/lib/Analysis/NoAliasAnalysis.cpp
@@ -41,7 +41,8 @@ namespace {
return true;
}
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
return MayAlias;
}
@@ -52,19 +53,17 @@ namespace {
return UnknownModRefBehavior;
}
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
return false;
}
- Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) override {
- Mask = ModRef;
- AAMDNodes AATags;
- CS->getAAMetadata(AATags);
- return Location(CS.getArgument(ArgIdx), UnknownSize, AATags);
+ ModRefResult getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) override {
+ return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override {
+ const MemoryLocation &Loc) override {
return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS1,
diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp
index 633d6aaad35e..8d80c6028ba3 100644
--- a/lib/Analysis/PHITransAddr.cpp
+++ b/lib/Analysis/PHITransAddr.cpp
@@ -244,13 +244,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
GEPI->getNumOperands() == GEPOps.size() &&
GEPI->getParent()->getParent() == CurBB->getParent() &&
(!DT || DT->dominates(GEPI->getParent(), PredBB))) {
- bool Mismatch = false;
- for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
- if (GEPI->getOperand(i) != GEPOps[i]) {
- Mismatch = true;
- break;
- }
- if (!Mismatch)
+ if (std::equal(GEPOps.begin(), GEPOps.end(), GEPI->op_begin()))
return GEPI;
}
}
@@ -392,10 +386,10 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
if (!OpVal) return nullptr;
// Otherwise insert a cast at the end of PredBB.
- CastInst *New = CastInst::Create(Cast->getOpcode(),
- OpVal, InVal->getType(),
- InVal->getName()+".phi.trans.insert",
+ CastInst *New = CastInst::Create(Cast->getOpcode(), OpVal, InVal->getType(),
+ InVal->getName() + ".phi.trans.insert",
PredBB->getTerminator());
+ New->setDebugLoc(Inst->getDebugLoc());
NewInsts.push_back(New);
return New;
}
@@ -414,6 +408,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
GetElementPtrInst *Result = GetElementPtrInst::Create(
GEP->getSourceElementType(), GEPOps[0], makeArrayRef(GEPOps).slice(1),
InVal->getName() + ".phi.trans.insert", PredBB->getTerminator());
+ Result->setDebugLoc(Inst->getDebugLoc());
Result->setIsInBounds(GEP->isInBounds());
NewInsts.push_back(Result);
return Result;
diff --git a/lib/Analysis/RegionPrinter.cpp b/lib/Analysis/RegionPrinter.cpp
index d7f510984881..2b09becaac38 100644
--- a/lib/Analysis/RegionPrinter.cpp
+++ b/lib/Analysis/RegionPrinter.cpp
@@ -194,7 +194,7 @@ struct RegionOnlyPrinter
}
};
-}
+} // namespace
char RegionOnlyPrinter::ID = 0;
INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 0e9f812c05e2..81e07e99dca1 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -627,7 +627,7 @@ namespace {
llvm_unreachable("Unknown SCEV kind!");
}
};
-}
+} // namespace
/// GroupByComplexity - Given a list of SCEV objects, order them by their
/// complexity, and group objects of the same complexity together by value.
@@ -689,7 +689,7 @@ struct FindSCEVSize {
return false;
}
};
-}
+} // namespace
// Returns the size of the SCEV S.
static inline int sizeOfSCEV(const SCEV *S) {
@@ -937,7 +937,7 @@ private:
const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// Simple SCEV method implementations
@@ -1248,7 +1248,7 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
-}
+} // namespace
// The recurrence AR has been shown to have no signed/unsigned wrap or something
// close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
@@ -3300,7 +3300,7 @@ namespace {
}
bool isDone() const { return FindOne; }
};
-}
+} // namespace
bool ScalarEvolution::checkValidity(const SCEV *S) const {
FindInvalidSCEVUnknown F;
@@ -7594,7 +7594,7 @@ struct FindUndefs {
return Found;
}
};
-}
+} // namespace
// Return true when S contains at least an undef value.
static inline bool
@@ -7644,7 +7644,7 @@ struct SCEVCollectTerms {
}
bool isDone() const { return false; }
};
-}
+} // namespace
/// Find parametric terms in this SCEVAddRecExpr.
void SCEVAddRecExpr::collectParametricTerms(
@@ -7737,7 +7737,7 @@ struct FindParameter {
return FoundParameter;
}
};
-}
+} // namespace
// Returns true when S contains at least a SCEVUnknown parameter.
static inline bool
@@ -8418,7 +8418,7 @@ struct SCEVSearch {
}
bool isDone() const { return IsFound; }
};
-}
+} // namespace
bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
SCEVSearch Search(Op);
diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index ccec0a877f5a..2d45c59a500c 100644
--- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -53,7 +53,8 @@ namespace {
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
Value *GetBaseValue(const SCEV *S);
};
@@ -107,8 +108,8 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
}
AliasAnalysis::AliasResult
-ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
- const Location &LocB) {
+ScalarEvolutionAliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are. This allows the code below to ignore this special
// case.
@@ -161,12 +162,12 @@ ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
Value *AO = GetBaseValue(AS);
Value *BO = GetBaseValue(BS);
if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
- if (alias(Location(AO ? AO : LocA.Ptr,
- AO ? +UnknownSize : LocA.Size,
- AO ? AAMDNodes() : LocA.AATags),
- Location(BO ? BO : LocB.Ptr,
- BO ? +UnknownSize : LocB.Size,
- BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
+ if (alias(MemoryLocation(AO ? AO : LocA.Ptr,
+ AO ? +MemoryLocation::UnknownSize : LocA.Size,
+ AO ? AAMDNodes() : LocA.AATags),
+ MemoryLocation(BO ? BO : LocB.Ptr,
+ BO ? +MemoryLocation::UnknownSize : LocB.Size,
+ BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index f82235d0c26e..0264ad143f49 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -661,7 +661,7 @@ public:
}
};
-}
+} // namespace
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
@@ -1702,7 +1702,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
- // Process phis from wide to narrow. Mapping wide phis to the their truncation
+ // Process phis from wide to narrow. Map wide phis to their truncation
// so narrow phis can reuse them.
for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
@@ -1933,7 +1933,7 @@ struct SCEVFindUnsafe {
}
bool isDone() const { return IsUnsafe; }
};
-}
+} // namespace
namespace llvm {
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
diff --git a/lib/Analysis/ScopedNoAliasAA.cpp b/lib/Analysis/ScopedNoAliasAA.cpp
index 02f8b0b1384f..a8cfeb67ef94 100644
--- a/lib/Analysis/ScopedNoAliasAA.cpp
+++ b/lib/Analysis/ScopedNoAliasAA.cpp
@@ -99,12 +99,13 @@ protected:
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override;
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
ModRefBehavior getModRefBehavior(const Function *F) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
};
@@ -176,8 +177,8 @@ ScopedNoAliasAA::mayAliasInScopes(const MDNode *Scopes,
return true;
}
-AliasAnalysis::AliasResult
-ScopedNoAliasAA::alias(const Location &LocA, const Location &LocB) {
+AliasAnalysis::AliasResult ScopedNoAliasAA::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
if (!EnableScopedNoAlias)
return AliasAnalysis::alias(LocA, LocB);
@@ -198,7 +199,7 @@ ScopedNoAliasAA::alias(const Location &LocA, const Location &LocB) {
return AliasAnalysis::alias(LocA, LocB);
}
-bool ScopedNoAliasAA::pointsToConstantMemory(const Location &Loc,
+bool ScopedNoAliasAA::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
@@ -214,7 +215,8 @@ ScopedNoAliasAA::getModRefBehavior(const Function *F) {
}
AliasAnalysis::ModRefResult
-ScopedNoAliasAA::getModRefInfo(ImmutableCallSite CS, const Location &Loc) {
+ScopedNoAliasAA::getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) {
if (!EnableScopedNoAlias)
return AliasAnalysis::getModRefInfo(CS, Loc);
diff --git a/lib/Analysis/StratifiedSets.h b/lib/Analysis/StratifiedSets.h
index fd3fbc0d86ad..878ca3d4c70b 100644
--- a/lib/Analysis/StratifiedSets.h
+++ b/lib/Analysis/StratifiedSets.h
@@ -688,5 +688,5 @@ private:
bool inbounds(StratifiedIndex N) const { return N < Links.size(); }
};
-}
+} // namespace llvm
#endif // LLVM_ADT_STRATIFIEDSETS_H
diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 115872584cb2..82d29e0dc3fb 100644
--- a/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -270,7 +270,7 @@ namespace {
return TBAAStructTypeNode(P);
}
};
-}
+} // namespace
namespace {
/// TypeBasedAliasAnalysis - This is a simple alias analysis
@@ -300,12 +300,14 @@ namespace {
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override;
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
ModRefBehavior getModRefBehavior(const Function *F) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
};
@@ -453,8 +455,8 @@ TypeBasedAliasAnalysis::PathAliases(const MDNode *A,
}
AliasAnalysis::AliasResult
-TypeBasedAliasAnalysis::alias(const Location &LocA,
- const Location &LocB) {
+TypeBasedAliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
if (!EnableTBAA)
return AliasAnalysis::alias(LocA, LocB);
@@ -473,7 +475,7 @@ TypeBasedAliasAnalysis::alias(const Location &LocA,
return NoAlias;
}
-bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Location &Loc,
+bool TypeBasedAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
if (!EnableTBAA)
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
@@ -515,7 +517,7 @@ TypeBasedAliasAnalysis::getModRefBehavior(const Function *F) {
AliasAnalysis::ModRefResult
TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
if (!EnableTBAA)
return AliasAnalysis::getModRefInfo(CS, Loc);
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index c4f046340fce..c45005f343d3 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -551,12 +551,17 @@ static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp,
}
break;
case ICmpInst::ICMP_EQ:
- if (LHS == V)
- computeKnownBits(RHS, KnownZero, KnownOne, DL, Depth + 1, Q);
- else if (RHS == V)
- computeKnownBits(LHS, KnownZero, KnownOne, DL, Depth + 1, Q);
- else
- llvm_unreachable("missing use?");
+ {
+ APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
+ if (LHS == V)
+ computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
+ else if (RHS == V)
+ computeKnownBits(LHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
+ else
+ llvm_unreachable("missing use?");
+ KnownZero |= KnownZeroTemp;
+ KnownOne |= KnownOneTemp;
+ }
break;
case ICmpInst::ICMP_ULE:
if (LHS == V) {
@@ -936,147 +941,11 @@ static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
}
}
-/// Determine which bits of V are known to be either zero or one and return
-/// them in the KnownZero/KnownOne bit sets.
-///
-/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
-/// we cannot optimize based on the assumption that it is zero without changing
-/// it to be an explicit zero. If we don't change it to zero, other code could
-/// optimized based on the contradictory assumption that it is non-zero.
-/// Because instcombine aggressively folds operations with undef args anyway,
-/// this won't lose us code quality.
-///
-/// This function is defined on values with integer type, values with pointer
-/// type, and vectors of integers. In the case
-/// where V is a vector, known zero, and known one values are the
-/// same width as the vector element, and the bit is set only if it is true
-/// for all of the elements in the vector.
-void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const DataLayout &DL, unsigned Depth, const Query &Q) {
- assert(V && "No Value?");
- assert(Depth <= MaxDepth && "Limit Search Depth");
+static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
+ APInt &KnownOne, const DataLayout &DL,
+ unsigned Depth, const Query &Q) {
unsigned BitWidth = KnownZero.getBitWidth();
- assert((V->getType()->isIntOrIntVectorTy() ||
- V->getType()->getScalarType()->isPointerTy()) &&
- "Not integer or pointer type!");
- assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
- (!V->getType()->isIntOrIntVectorTy() ||
- V->getType()->getScalarSizeInBits() == BitWidth) &&
- KnownZero.getBitWidth() == BitWidth &&
- KnownOne.getBitWidth() == BitWidth &&
- "V, KnownOne and KnownZero should have same BitWidth");
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- // We know all of the bits for a constant!
- KnownOne = CI->getValue();
- KnownZero = ~KnownOne;
- return;
- }
- // Null and aggregate-zero are all-zeros.
- if (isa<ConstantPointerNull>(V) ||
- isa<ConstantAggregateZero>(V)) {
- KnownOne.clearAllBits();
- KnownZero = APInt::getAllOnesValue(BitWidth);
- return;
- }
- // Handle a constant vector by taking the intersection of the known bits of
- // each element. There is no real need to handle ConstantVector here, because
- // we don't handle undef in any particularly useful way.
- if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
- // We know that CDS must be a vector of integers. Take the intersection of
- // each element.
- KnownZero.setAllBits(); KnownOne.setAllBits();
- APInt Elt(KnownZero.getBitWidth(), 0);
- for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
- Elt = CDS->getElementAsInteger(i);
- KnownZero &= ~Elt;
- KnownOne &= Elt;
- }
- return;
- }
-
- // The address of an aligned GlobalValue has trailing zeros.
- if (auto *GO = dyn_cast<GlobalObject>(V)) {
- unsigned Align = GO->getAlignment();
- if (Align == 0) {
- if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
- Type *ObjectType = GVar->getType()->getElementType();
- if (ObjectType->isSized()) {
- // If the object is defined in the current Module, we'll be giving
- // it the preferred alignment. Otherwise, we have to assume that it
- // may only have the minimum ABI alignment.
- if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
- Align = DL.getPreferredAlignment(GVar);
- else
- Align = DL.getABITypeAlignment(ObjectType);
- }
- }
- }
- if (Align > 0)
- KnownZero = APInt::getLowBitsSet(BitWidth,
- countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
- return;
- }
-
- if (Argument *A = dyn_cast<Argument>(V)) {
- unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
-
- if (!Align && A->hasStructRetAttr()) {
- // An sret parameter has at least the ABI alignment of the return type.
- Type *EltTy = cast<PointerType>(A->getType())->getElementType();
- if (EltTy->isSized())
- Align = DL.getABITypeAlignment(EltTy);
- }
-
- if (Align)
- KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
-
- // Don't give up yet... there might be an assumption that provides more
- // information...
- computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
-
- // Or a dominating condition for that matter
- if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
- computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
- Depth, Q);
- return;
- }
-
- // Start out not knowing anything.
- KnownZero.clearAllBits(); KnownOne.clearAllBits();
-
- // Limit search depth.
- // All recursive calls that increase depth must come after this.
- if (Depth == MaxDepth)
- return;
-
- // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
- // the bits of its aliasee.
- if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (!GA->mayBeOverridden())
- computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
- return;
- }
-
- // Check whether a nearby assume intrinsic can determine some known bits.
- computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
-
- // Check whether there's a dominating condition which implies something about
- // this value at the given context.
- if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
- computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
- Q);
-
- Operator *I = dyn_cast<Operator>(V);
- if (!I) return;
-
APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
switch (I->getOpcode()) {
default: break;
@@ -1328,7 +1197,7 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
case Instruction::Alloca: {
- AllocaInst *AI = cast<AllocaInst>(V);
+ AllocaInst *AI = cast<AllocaInst>(I);
unsigned Align = AI->getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(AI->getType()->getElementType());
@@ -1523,6 +1392,151 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
}
}
+}
+
+/// Determine which bits of V are known to be either zero or one and return
+/// them in the KnownZero/KnownOne bit sets.
+///
+/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
+/// we cannot optimize based on the assumption that it is zero without changing
+/// it to be an explicit zero. If we don't change it to zero, other code could
+/// optimized based on the contradictory assumption that it is non-zero.
+/// Because instcombine aggressively folds operations with undef args anyway,
+/// this won't lose us code quality.
+///
+/// This function is defined on values with integer type, values with pointer
+/// type, and vectors of integers. In the case
+/// where V is a vector, known zero, and known one values are the
+/// same width as the vector element, and the bit is set only if it is true
+/// for all of the elements in the vector.
+void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ const DataLayout &DL, unsigned Depth, const Query &Q) {
+ assert(V && "No Value?");
+ assert(Depth <= MaxDepth && "Limit Search Depth");
+ unsigned BitWidth = KnownZero.getBitWidth();
+
+ assert((V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->getScalarType()->isPointerTy()) &&
+ "Not integer or pointer type!");
+ assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
+ (!V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->getScalarSizeInBits() == BitWidth) &&
+ KnownZero.getBitWidth() == BitWidth &&
+ KnownOne.getBitWidth() == BitWidth &&
+ "V, KnownOne and KnownZero should have same BitWidth");
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ // We know all of the bits for a constant!
+ KnownOne = CI->getValue();
+ KnownZero = ~KnownOne;
+ return;
+ }
+ // Null and aggregate-zero are all-zeros.
+ if (isa<ConstantPointerNull>(V) ||
+ isa<ConstantAggregateZero>(V)) {
+ KnownOne.clearAllBits();
+ KnownZero = APInt::getAllOnesValue(BitWidth);
+ return;
+ }
+ // Handle a constant vector by taking the intersection of the known bits of
+ // each element. There is no real need to handle ConstantVector here, because
+ // we don't handle undef in any particularly useful way.
+ if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
+ // We know that CDS must be a vector of integers. Take the intersection of
+ // each element.
+ KnownZero.setAllBits(); KnownOne.setAllBits();
+ APInt Elt(KnownZero.getBitWidth(), 0);
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ Elt = CDS->getElementAsInteger(i);
+ KnownZero &= ~Elt;
+ KnownOne &= Elt;
+ }
+ return;
+ }
+
+ // The address of an aligned GlobalValue has trailing zeros.
+ if (auto *GO = dyn_cast<GlobalObject>(V)) {
+ unsigned Align = GO->getAlignment();
+ if (Align == 0) {
+ if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
+ Type *ObjectType = GVar->getType()->getElementType();
+ if (ObjectType->isSized()) {
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
+ Align = DL.getPreferredAlignment(GVar);
+ else
+ Align = DL.getABITypeAlignment(ObjectType);
+ }
+ }
+ }
+ if (Align > 0)
+ KnownZero = APInt::getLowBitsSet(BitWidth,
+ countTrailingZeros(Align));
+ else
+ KnownZero.clearAllBits();
+ KnownOne.clearAllBits();
+ return;
+ }
+
+ if (Argument *A = dyn_cast<Argument>(V)) {
+ unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
+
+ if (!Align && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = DL.getABITypeAlignment(EltTy);
+ }
+
+ if (Align)
+ KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
+ else
+ KnownZero.clearAllBits();
+ KnownOne.clearAllBits();
+
+ // Don't give up yet... there might be an assumption that provides more
+ // information...
+ computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
+
+ // Or a dominating condition for that matter
+ if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
+ computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
+ Depth, Q);
+ return;
+ }
+
+ // Start out not knowing anything.
+ KnownZero.clearAllBits(); KnownOne.clearAllBits();
+
+ // Limit search depth.
+ // All recursive calls that increase depth must come after this.
+ if (Depth == MaxDepth)
+ return;
+
+ // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
+ // the bits of its aliasee.
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (!GA->mayBeOverridden())
+ computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
+ return;
+ }
+
+ if (Operator *I = dyn_cast<Operator>(V))
+ computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q);
+ // computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition
+ // strictly refines KnownZero and KnownOne. Therefore, we run them after
+ // computeKnownBitsFromOperator.
+
+ // Check whether a nearby assume intrinsic can determine some known bits.
+ computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
+
+ // Check whether there's a dominating condition which implies something about
+ // this value at the given context.
+ if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
+ computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
+ Q);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}