aboutsummaryrefslogtreecommitdiff
path: root/lib/Analysis/AliasAnalysis.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Analysis/AliasAnalysis.cpp')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp615
1 files changed, 348 insertions, 267 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index 44d137dffd22..35f2e97622fa 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -25,9 +25,16 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/CFLAliasAnalysis.h"
#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/ObjCARCAliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/ScopedNoAliasAA.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
@@ -40,44 +47,72 @@
#include "llvm/Pass.h"
using namespace llvm;
-// Register the AliasAnalysis interface, providing a nice name to refer to.
-INITIALIZE_ANALYSIS_GROUP(AliasAnalysis, "Alias Analysis", NoAA)
-char AliasAnalysis::ID = 0;
+/// Allow disabling BasicAA from the AA results. This is particularly useful
+/// when testing to isolate a single AA implementation.
+static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden,
+ cl::init(false));
+
+AAResults::AAResults(AAResults &&Arg) : AAs(std::move(Arg.AAs)) {
+ for (auto &AA : AAs)
+ AA->setAAResults(this);
+}
+
+AAResults &AAResults::operator=(AAResults &&Arg) {
+ AAs = std::move(Arg.AAs);
+ for (auto &AA : AAs)
+ AA->setAAResults(this);
+ return *this;
+}
+
+AAResults::~AAResults() {
+// FIXME; It would be nice to at least clear out the pointers back to this
+// aggregation here, but we end up with non-nesting lifetimes in the legacy
+// pass manager that prevent this from working. In the legacy pass manager
+// we'll end up with dangling references here in some cases.
+#if 0
+ for (auto &AA : AAs)
+ AA->setAAResults(nullptr);
+#endif
+}
//===----------------------------------------------------------------------===//
// Default chaining methods
//===----------------------------------------------------------------------===//
-AliasResult AliasAnalysis::alias(const MemoryLocation &LocA,
- const MemoryLocation &LocB) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->alias(LocA, LocB);
+AliasResult AAResults::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
+ for (const auto &AA : AAs) {
+ auto Result = AA->alias(LocA, LocB);
+ if (Result != MayAlias)
+ return Result;
+ }
+ return MayAlias;
}
-bool AliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
- bool OrLocal) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->pointsToConstantMemory(Loc, OrLocal);
-}
+bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) {
+ for (const auto &AA : AAs)
+ if (AA->pointsToConstantMemory(Loc, OrLocal))
+ return true;
-AliasAnalysis::ModRefResult
-AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getArgModRefInfo(CS, ArgIdx);
+ return false;
}
-void AliasAnalysis::deleteValue(Value *V) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- AA->deleteValue(V);
-}
+ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ ModRefInfo Result = MRI_ModRef;
+
+ for (const auto &AA : AAs) {
+ Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
-void AliasAnalysis::addEscapingUse(Use &U) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- AA->addEscapingUse(U);
+ // Early-exit the moment we reach the bottom of the lattice.
+ if (Result == MRI_NoModRef)
+ return Result;
+ }
+
+ return Result;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
+ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// We may have two calls
if (auto CS = ImmutableCallSite(I)) {
// Check if the two calls modify the same memory
@@ -88,289 +123,215 @@ AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// is that if the call references what this instruction
// defines, it must be clobbered by this location.
const MemoryLocation DefLoc = MemoryLocation::get(I);
- if (getModRefInfo(Call, DefLoc) != AliasAnalysis::NoModRef)
- return AliasAnalysis::ModRef;
- }
- return AliasAnalysis::NoModRef;
-}
-
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
-
- ModRefBehavior MRB = getModRefBehavior(CS);
- if (MRB == DoesNotAccessMemory)
- return NoModRef;
-
- ModRefResult Mask = ModRef;
- if (onlyReadsMemory(MRB))
- Mask = Ref;
-
- if (onlyAccessesArgPointees(MRB)) {
- bool doesAlias = false;
- ModRefResult AllArgsMask = NoModRef;
- if (doesAccessArgPointees(MRB)) {
- for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
- AI != AE; ++AI) {
- const Value *Arg = *AI;
- if (!Arg->getType()->isPointerTy())
- continue;
- unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
- MemoryLocation ArgLoc =
- MemoryLocation::getForArgument(CS, ArgIdx, *TLI);
- if (!isNoAlias(ArgLoc, Loc)) {
- ModRefResult ArgMask = getArgModRefInfo(CS, ArgIdx);
- doesAlias = true;
- AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
- }
- }
- }
- if (!doesAlias)
- return NoModRef;
- Mask = ModRefResult(Mask & AllArgsMask);
+ if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
+ return MRI_ModRef;
}
+ return MRI_NoModRef;
+}
- // If Loc is a constant memory location, the call definitely could not
- // modify the memory location.
- if ((Mask & Mod) && pointsToConstantMemory(Loc))
- Mask = ModRefResult(Mask & ~Mod);
-
- // If this is the end of the chain, don't forward.
- if (!AA) return Mask;
-
- // Otherwise, fall back to the next AA in the chain. But we can merge
- // in any mask we've managed to compute.
- return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask);
-}
-
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
-
- // If CS1 or CS2 are readnone, they don't interact.
- ModRefBehavior CS1B = getModRefBehavior(CS1);
- if (CS1B == DoesNotAccessMemory) return NoModRef;
-
- ModRefBehavior CS2B = getModRefBehavior(CS2);
- if (CS2B == DoesNotAccessMemory) return NoModRef;
-
- // If they both only read from memory, there is no dependence.
- if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
- return NoModRef;
-
- AliasAnalysis::ModRefResult Mask = ModRef;
-
- // If CS1 only reads memory, the only dependence on CS2 can be
- // from CS1 reading memory written by CS2.
- if (onlyReadsMemory(CS1B))
- Mask = ModRefResult(Mask & Ref);
-
- // If CS2 only access memory through arguments, accumulate the mod/ref
- // information from CS1's references to the memory referenced by
- // CS2's arguments.
- if (onlyAccessesArgPointees(CS2B)) {
- AliasAnalysis::ModRefResult R = NoModRef;
- if (doesAccessArgPointees(CS2B)) {
- for (ImmutableCallSite::arg_iterator
- I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
- const Value *Arg = *I;
- if (!Arg->getType()->isPointerTy())
- continue;
- unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
- auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI);
-
- // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of
- // CS1 on that location is the inverse.
- ModRefResult ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
- if (ArgMask == Mod)
- ArgMask = ModRef;
- else if (ArgMask == Ref)
- ArgMask = Mod;
-
- R = ModRefResult((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask);
- if (R == Mask)
- break;
- }
- }
- return R;
- }
+ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) {
+ ModRefInfo Result = MRI_ModRef;
- // If CS1 only accesses memory through arguments, check if CS2 references
- // any of the memory referenced by CS1's arguments. If not, return NoModRef.
- if (onlyAccessesArgPointees(CS1B)) {
- AliasAnalysis::ModRefResult R = NoModRef;
- if (doesAccessArgPointees(CS1B)) {
- for (ImmutableCallSite::arg_iterator
- I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
- const Value *Arg = *I;
- if (!Arg->getType()->isPointerTy())
- continue;
- unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
- auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI);
-
- // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
- // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
- // might Ref, then we care only about a Mod by CS2.
- ModRefResult ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
- ModRefResult ArgR = getModRefInfo(CS2, CS1ArgLoc);
- if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
- ((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
- R = ModRefResult((R | ArgMask) & Mask);
-
- if (R == Mask)
- break;
- }
- }
- return R;
- }
+ for (const auto &AA : AAs) {
+ Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
- // If this is the end of the chain, don't forward.
- if (!AA) return Mask;
+ // Early-exit the moment we reach the bottom of the lattice.
+ if (Result == MRI_NoModRef)
+ return Result;
+ }
- // Otherwise, fall back to the next AA in the chain. But we can merge
- // in any mask we've managed to compute.
- return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask);
+ return Result;
}
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
+ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ ModRefInfo Result = MRI_ModRef;
+
+ for (const auto &AA : AAs) {
+ Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
+
+ // Early-exit the moment we reach the bottom of the lattice.
+ if (Result == MRI_NoModRef)
+ return Result;
+ }
+
+ return Result;
+}
- ModRefBehavior Min = UnknownModRefBehavior;
+FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
+ FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
- // Call back into the alias analysis with the other form of getModRefBehavior
- // to see if it can give a better response.
- if (const Function *F = CS.getCalledFunction())
- Min = getModRefBehavior(F);
+ for (const auto &AA : AAs) {
+ Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
- // If this is the end of the chain, don't forward.
- if (!AA) return Min;
+ // Early-exit the moment we reach the bottom of the lattice.
+ if (Result == FMRB_DoesNotAccessMemory)
+ return Result;
+ }
- // Otherwise, fall back to the next AA in the chain. But we can merge
- // in any result we've managed to compute.
- return ModRefBehavior(AA->getModRefBehavior(CS) & Min);
+ return Result;
}
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(const Function *F) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getModRefBehavior(F);
+FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) {
+ FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
+
+ for (const auto &AA : AAs) {
+ Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F));
+
+ // Early-exit the moment we reach the bottom of the lattice.
+ if (Result == FMRB_DoesNotAccessMemory)
+ return Result;
+ }
+
+ return Result;
}
//===----------------------------------------------------------------------===//
-// AliasAnalysis non-virtual helper method implementation
+// Helper method implementation
//===----------------------------------------------------------------------===//
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const LoadInst *L, const MemoryLocation &Loc) {
+ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
+ const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!L->isUnordered())
- return ModRef;
+ return MRI_ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
- return NoModRef;
+ return MRI_NoModRef;
// Otherwise, a load just reads.
- return Ref;
+ return MRI_Ref;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) {
+ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
+ const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!S->isUnordered())
- return ModRef;
+ return MRI_ModRef;
if (Loc.Ptr) {
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
if (!alias(MemoryLocation::get(S), Loc))
- return NoModRef;
+ return MRI_NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this store.
if (pointsToConstantMemory(Loc))
- return NoModRef;
-
+ return MRI_NoModRef;
}
// Otherwise, a store just writes.
- return Mod;
+ return MRI_Mod;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc) {
+ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
+ const MemoryLocation &Loc) {
if (Loc.Ptr) {
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
if (!alias(MemoryLocation::get(V), Loc))
- return NoModRef;
+ return MRI_NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this va_arg.
if (pointsToConstantMemory(Loc))
- return NoModRef;
+ return MRI_NoModRef;
}
// Otherwise, a va_arg reads and writes.
- return ModRef;
+ return MRI_ModRef;
+}
+
+ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
+ const MemoryLocation &Loc) {
+ if (Loc.Ptr) {
+ // If the pointer is a pointer to constant memory,
+ // then it could not have been modified by this catchpad.
+ if (pointsToConstantMemory(Loc))
+ return MRI_NoModRef;
+ }
+
+ // Otherwise, a catchpad reads and writes.
+ return MRI_ModRef;
+}
+
+ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
+ const MemoryLocation &Loc) {
+ if (Loc.Ptr) {
+ // If the pointer is a pointer to constant memory,
+ // then it could not have been modified by this catchpad.
+ if (pointsToConstantMemory(Loc))
+ return MRI_NoModRef;
+ }
+
+ // Otherwise, a catchret reads and writes.
+ return MRI_ModRef;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX,
- const MemoryLocation &Loc) {
+ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (CX->getSuccessOrdering() > Monotonic)
- return ModRef;
+ return MRI_ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
- return NoModRef;
+ return MRI_NoModRef;
- return ModRef;
+ return MRI_ModRef;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW,
- const MemoryLocation &Loc) {
+ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
+ const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (RMW->getOrdering() > Monotonic)
- return ModRef;
+ return MRI_ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
- return NoModRef;
+ return MRI_NoModRef;
- return ModRef;
+ return MRI_ModRef;
}
-// FIXME: this is really just shoring-up a deficiency in alias analysis.
-// BasicAA isn't willing to spend linear time determining whether an alloca
-// was captured before or after this particular call, while we are. However,
-// with a smarter AA in place, this test is just wasting compile time.
-AliasAnalysis::ModRefResult AliasAnalysis::callCapturesBefore(
- const Instruction *I, const MemoryLocation &MemLoc, DominatorTree *DT) {
+/// \brief Return information about whether a particular call site modifies
+/// or reads the specified memory location \p MemLoc before instruction \p I
+/// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
+/// instruction-ordering queries inside the BasicBlock containing \p I.
+/// FIXME: this is really just shoring-up a deficiency in alias analysis.
+/// BasicAA isn't willing to spend linear time determining whether an alloca
+/// was captured before or after this particular call, while we are. However,
+/// with a smarter AA in place, this test is just wasting compile time.
+ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
+ const MemoryLocation &MemLoc,
+ DominatorTree *DT,
+ OrderedBasicBlock *OBB) {
if (!DT)
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
- const Value *Object = GetUnderlyingObject(MemLoc.Ptr, *DL);
+ const Value *Object =
+ GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
ImmutableCallSite CS(I);
if (!CS.getInstruction() || CS.getInstruction() == Object)
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
/* StoreCaptures */ true, I, DT,
- /* include Object */ true))
- return AliasAnalysis::ModRef;
+ /* include Object */ true,
+ /* OrderedBasicBlock */ OBB))
+ return MRI_ModRef;
unsigned ArgNo = 0;
- AliasAnalysis::ModRefResult R = AliasAnalysis::NoModRef;
+ ModRefInfo R = MRI_NoModRef;
for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture or byval pointer arguments. If this
@@ -389,50 +350,20 @@ AliasAnalysis::ModRefResult AliasAnalysis::callCapturesBefore(
if (CS.doesNotAccessMemory(ArgNo))
continue;
if (CS.onlyReadsMemory(ArgNo)) {
- R = AliasAnalysis::Ref;
+ R = MRI_Ref;
continue;
}
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
}
return R;
}
-// AliasAnalysis destructor: DO NOT move this to the header file for
-// AliasAnalysis or else clients of the AliasAnalysis class may not depend on
-// the AliasAnalysis.o file in the current .a file, causing alias analysis
-// support to not be included in the tool correctly!
-//
-AliasAnalysis::~AliasAnalysis() {}
-
-/// InitializeAliasAnalysis - Subclasses must call this method to initialize the
-/// AliasAnalysis interface before any other methods are called.
-///
-void AliasAnalysis::InitializeAliasAnalysis(Pass *P, const DataLayout *NewDL) {
- DL = NewDL;
- auto *TLIP = P->getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
- TLI = TLIP ? &TLIP->getTLI() : nullptr;
- AA = &P->getAnalysis<AliasAnalysis>();
-}
-
-// getAnalysisUsage - All alias analysis implementations should invoke this
-// directly (using AliasAnalysis::getAnalysisUsage(AU)).
-void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<AliasAnalysis>(); // All AA's chain
-}
-
-/// getTypeStoreSize - Return the DataLayout store size for the given type,
-/// if known, or a conservative value otherwise.
-///
-uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
- return DL ? DL->getTypeStoreSize(Ty) : MemoryLocation::UnknownSize;
-}
-
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the location Loc.
///
-bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
- const MemoryLocation &Loc) {
- return canInstructionRangeModRef(BB.front(), BB.back(), Loc, Mod);
+bool AAResults::canBasicBlockModify(const BasicBlock &BB,
+ const MemoryLocation &Loc) {
+ return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
}
/// canInstructionRangeModRef - Return true if it is possible for the
@@ -440,28 +371,178 @@ bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
/// mode) the location Loc. The instructions to consider are all
/// of the instructions in the range of [I1,I2] INCLUSIVE.
/// I1 and I2 must be in the same basic block.
-bool AliasAnalysis::canInstructionRangeModRef(const Instruction &I1,
- const Instruction &I2,
- const MemoryLocation &Loc,
- const ModRefResult Mode) {
+bool AAResults::canInstructionRangeModRef(const Instruction &I1,
+ const Instruction &I2,
+ const MemoryLocation &Loc,
+ const ModRefInfo Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
- BasicBlock::const_iterator I = &I1;
- BasicBlock::const_iterator E = &I2;
+ BasicBlock::const_iterator I = I1.getIterator();
+ BasicBlock::const_iterator E = I2.getIterator();
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
- if (getModRefInfo(I, Loc) & Mode)
+ if (getModRefInfo(&*I, Loc) & Mode)
return true;
return false;
}
+// Provide a definition for the root virtual destructor.
+AAResults::Concept::~Concept() {}
+
+namespace {
+/// A wrapper pass for external alias analyses. This just squirrels away the
+/// callback used to run any analyses and register their results.
+struct ExternalAAWrapperPass : ImmutablePass {
+ typedef std::function<void(Pass &, Function &, AAResults &)> CallbackT;
+
+ CallbackT CB;
+
+ static char ID;
+
+ ExternalAAWrapperPass() : ImmutablePass(ID) {
+ initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+ explicit ExternalAAWrapperPass(CallbackT CB)
+ : ImmutablePass(ID), CB(std::move(CB)) {
+ initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+}
+
+char ExternalAAWrapperPass::ID = 0;
+INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
+ false, true)
+
+ImmutablePass *
+llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
+ return new ExternalAAWrapperPass(std::move(Callback));
+}
+
+AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
+ initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
+}
+
+char AAResultsWrapperPass::ID = 0;
+
+INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
+ "Function Alias Analysis Results", false, true)
+INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(CFLAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
+INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
+ "Function Alias Analysis Results", false, true)
+
+FunctionPass *llvm::createAAResultsWrapperPass() {
+ return new AAResultsWrapperPass();
+}
+
+/// Run the wrapper pass to rebuild an aggregation over known AA passes.
+///
+/// This is the legacy pass manager's interface to the new-style AA results
+/// aggregation object. Because this is somewhat shoe-horned into the legacy
+/// pass manager, we hard code all the specific alias analyses available into
+/// it. While the particular set enabled is configured via commandline flags,
+/// adding a new alias analysis to LLVM will require adding support for it to
+/// this list.
+bool AAResultsWrapperPass::runOnFunction(Function &F) {
+ // NB! This *must* be reset before adding new AA results to the new
+ // AAResults object because in the legacy pass manager, each instance
+ // of these will refer to the *same* immutable analyses, registering and
+ // unregistering themselves with them. We need to carefully tear down the
+ // previous object first, in this case replacing it with an empty one, before
+ // registering new results.
+ AAR.reset(new AAResults());
+
+ // BasicAA is always available for function analyses. Also, we add it first
+ // so that it can trump TBAA results when it proves MustAlias.
+ // FIXME: TBAA should have an explicit mode to support this and then we
+ // should reconsider the ordering here.
+ if (!DisableBasicAA)
+ AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
+
+ // Populate the results with the currently available AAs.
+ if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
+ AAR->addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
+ AAR->addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass =
+ getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
+ AAR->addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
+ AAR->addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
+ AAR->addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = getAnalysisIfAvailable<CFLAAWrapperPass>())
+ AAR->addAAResult(WrapperPass->getResult());
+
+ // If available, run an external AA providing callback over the results as
+ // well.
+ if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
+ if (WrapperPass->CB)
+ WrapperPass->CB(*this, F, *AAR);
+
+ // Analyses don't mutate the IR, so return false.
+ return false;
+}
+
+void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<BasicAAWrapperPass>();
+
+ // We also need to mark all the alias analysis passes we will potentially
+ // probe in runOnFunction as used here to ensure the legacy pass manager
+ // preserves them. This hard coding of lists of alias analyses is specific to
+ // the legacy pass manager.
+ AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
+ AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
+ AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
+ AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
+ AU.addUsedIfAvailable<SCEVAAWrapperPass>();
+ AU.addUsedIfAvailable<CFLAAWrapperPass>();
+}
+
+AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
+ BasicAAResult &BAR) {
+ AAResults AAR;
+
+ // Add in our explicitly constructed BasicAA results.
+ if (!DisableBasicAA)
+ AAR.addAAResult(BAR);
+
+ // Populate the results with the other currently available AAs.
+ if (auto *WrapperPass =
+ P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
+ AAR.addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
+ AAR.addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass =
+ P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
+ AAR.addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
+ AAR.addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = P.getAnalysisIfAvailable<SCEVAAWrapperPass>())
+ AAR.addAAResult(WrapperPass->getResult());
+ if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAAWrapperPass>())
+ AAR.addAAResult(WrapperPass->getResult());
+
+ return AAR;
+}
+
/// isNoAliasCall - Return true if this pointer is returned by a noalias
/// function.
bool llvm::isNoAliasCall(const Value *V) {
- if (isa<CallInst>(V) || isa<InvokeInst>(V))
- return ImmutableCallSite(cast<Instruction>(V))
- .paramHasAttr(0, Attribute::NoAlias);
+ if (auto CS = ImmutableCallSite(V))
+ return CS.paramHasAttr(0, Attribute::NoAlias);
return false;
}