aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms/Scalar/GVN.cpp')
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp99
1 files changed, 55 insertions, 44 deletions
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index 4822fd09448c..f003e0669966 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -41,7 +41,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/PatternMatch.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -271,16 +271,16 @@ void ValueTable::add(Value *V, uint32_t num) {
valueNumbering.insert(std::make_pair(V, num));
}
-uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
+uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
if (AA->doesNotAccessMemory(C)) {
Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
+ uint32_t &e = expressionNumbering[exp];
if (!e) e = nextValueNumber++;
valueNumbering[C] = e;
return e;
} else if (AA->onlyReadsMemory(C)) {
Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
+ uint32_t &e = expressionNumbering[exp];
if (!e) {
e = nextValueNumber++;
valueNumbering[C] = e;
@@ -413,7 +413,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
- case Instruction::Or :
+ case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
@@ -503,7 +503,7 @@ namespace {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
ValueTable VN;
@@ -535,7 +535,7 @@ namespace {
InstrsToErase.push_back(I);
}
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@@ -632,6 +632,7 @@ INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void GVN::dump(DenseMap<uint32_t, Value*>& d) {
errs() << "{\n";
for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
@@ -641,6 +642,7 @@ void GVN::dump(DenseMap<uint32_t, Value*>& d) {
}
errs() << "}\n";
}
+#endif
/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
/// we're analyzing is fully available in the specified block. As we go, keep
@@ -728,7 +730,7 @@ SpeculationFailure:
/// CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
Type *LoadTy,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
@@ -744,7 +746,6 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
return true;
}
-
/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
/// the stored value. LoadedTy is the type of the load we want to replace and
@@ -754,7 +755,7 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
- const TargetData &TD) {
+ const DataLayout &TD) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
return 0;
@@ -767,24 +768,25 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// If the store and reload are the same size, we can always reuse it.
if (StoreSize == LoadSize) {
// Pointer to Pointer -> use bitcast.
- if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy())
+ if (StoredValTy->getScalarType()->isPointerTy() &&
+ LoadedTy->getScalarType()->isPointerTy())
return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
// Convert source pointers to integers, which can be bitcast.
- if (StoredValTy->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
+ if (StoredValTy->getScalarType()->isPointerTy()) {
+ StoredValTy = TD.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
Type *TypeToCastTo = LoadedTy;
- if (TypeToCastTo->isPointerTy())
- TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
+ if (TypeToCastTo->getScalarType()->isPointerTy())
+ TypeToCastTo = TD.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
// Cast to pointer if the load needs a pointer type.
- if (LoadedTy->isPointerTy())
+ if (LoadedTy->getScalarType()->isPointerTy())
StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
return StoredVal;
@@ -796,8 +798,8 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
// Convert source pointers to integers, which can be manipulated.
- if (StoredValTy->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
+ if (StoredValTy->getScalarType()->isPointerTy()) {
+ StoredValTy = TD.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
@@ -822,7 +824,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
return StoredVal;
// If the result is a pointer, inttoptr.
- if (LoadedTy->isPointerTy())
+ if (LoadedTy->getScalarType()->isPointerTy())
return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
// Otherwise, bitcast.
@@ -840,7 +842,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the loaded or stored value is a first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
@@ -913,7 +915,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
@@ -929,7 +931,7 @@ static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
- LoadInst *DepLI, const TargetData &TD){
+ LoadInst *DepLI, const DataLayout &TD){
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
@@ -957,7 +959,7 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemIntrinsic *MI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the mem operation is a non-constant size, we can't handle it.
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
if (SizeCst == 0) return -1;
@@ -1007,7 +1009,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
- Instruction *InsertPt, const TargetData &TD){
+ Instruction *InsertPt, const DataLayout &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
@@ -1017,8 +1019,9 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
- if (SrcVal->getType()->isPointerTy())
- SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx));
+ if (SrcVal->getType()->getScalarType()->isPointerTy())
+ SrcVal = Builder.CreatePtrToInt(SrcVal,
+ TD.getIntPtrType(SrcVal->getType()));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
@@ -1046,7 +1049,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const TargetData &TD = *gvn.getTargetData();
+ const DataLayout &TD = *gvn.getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
@@ -1105,7 +1108,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
- const TargetData &TD){
+ const DataLayout &TD){
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
@@ -1229,7 +1232,7 @@ struct AvailableValueInBlock {
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const TargetData *TD = gvn.getTargetData();
+ const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
*TD);
@@ -1251,7 +1254,7 @@ struct AvailableValueInBlock {
<< *Res << '\n' << "\n\n\n");
}
} else {
- const TargetData *TD = gvn.getTargetData();
+ const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
LoadTy, BB->getTerminator(), *TD);
@@ -1299,7 +1302,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
// If new PHI nodes were created, notify alias analysis.
- if (V->getType()->isPointerTy()) {
+ if (V->getType()->getScalarType()->isPointerTy()) {
AliasAnalysis *AA = gvn.getAliasAnalysis();
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
@@ -1436,7 +1439,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
- if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst) ||
+ if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
// Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
@@ -1496,7 +1499,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (isa<PHINode>(V))
V->takeName(LI);
- if (V->getType()->isPointerTy())
+ if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
++NumGVNLoad;
@@ -1728,7 +1731,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
- if (V->getType()->isPointerTy())
+ if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
++NumPRELoad;
@@ -1855,7 +1858,7 @@ bool GVN::processLoad(LoadInst *L) {
// Replace the load!
L->replaceAllUsesWith(AvailVal);
- if (AvailVal->getType()->isPointerTy())
+ if (AvailVal->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(AvailVal);
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -1912,7 +1915,7 @@ bool GVN::processLoad(LoadInst *L) {
// Remove it!
L->replaceAllUsesWith(StoredVal);
- if (StoredVal->getType()->isPointerTy())
+ if (StoredVal->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(StoredVal);
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -1941,7 +1944,7 @@ bool GVN::processLoad(LoadInst *L) {
// Remove it!
patchAndReplaceAllUsesWith(AvailableVal, L);
- if (DepLI->getType()->isPointerTy())
+ if (DepLI->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(DepLI);
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -1951,7 +1954,7 @@ bool GVN::processLoad(LoadInst *L) {
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
- if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
markInstructionForDeletion(L);
++NumGVNLoad;
@@ -2182,7 +2185,7 @@ bool GVN::processInstruction(Instruction *I) {
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
I->replaceAllUsesWith(V);
- if (MD && V->getType()->isPointerTy())
+ if (MD && V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(I);
++NumGVNSimpl;
@@ -2231,12 +2234,20 @@ bool GVN::processInstruction(Instruction *I) {
Value *SwitchCond = SI->getCondition();
BasicBlock *Parent = SI->getParent();
bool Changed = false;
+
+ // Remember how many outgoing edges there are to every successor.
+ SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
+ for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
+ ++SwitchEdges[SI->getSuccessor(i)];
+
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
BasicBlock *Dst = i.getCaseSuccessor();
- BasicBlockEdge E(Parent, Dst);
- if (E.isSingleEdge())
+ // If there is only a single edge, propagate the case value into it.
+ if (SwitchEdges.lookup(Dst) == 1) {
+ BasicBlockEdge E(Parent, Dst);
Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E);
+ }
}
return Changed;
}
@@ -2274,7 +2285,7 @@ bool GVN::processInstruction(Instruction *I) {
// Remove it!
patchAndReplaceAllUsesWith(repl, I);
- if (MD && repl->getType()->isPointerTy())
+ if (MD && repl->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(repl);
markInstructionForDeletion(I);
return true;
@@ -2285,7 +2296,7 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
@@ -2522,7 +2533,7 @@ bool GVN::performPRE(Function &F) {
addToLeaderTable(ValNo, Phi, CurrentBlock);
Phi->setDebugLoc(CurInst->getDebugLoc());
CurInst->replaceAllUsesWith(Phi);
- if (Phi->getType()->isPointerTy()) {
+ if (Phi->getType()->getScalarType()->isPointerTy()) {
// Because we have added a PHI-use of the pointer value, it has now
// "escaped" from alias analysis' perspective. We need to inform
// AA of this.