aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/ADCE.cpp2
-rw-r--r--lib/Transforms/Scalar/AlignmentFromAssumptions.cpp2
-rw-r--r--lib/Transforms/Scalar/BDCE.cpp2
-rw-r--r--lib/Transforms/Scalar/ConstantHoisting.cpp2
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp2
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--lib/Transforms/Scalar/DCE.cpp4
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp60
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp12
-rw-r--r--lib/Transforms/Scalar/FlattenCFGPass.cpp2
-rw-r--r--lib/Transforms/Scalar/Float2Int.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp55
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp15
-rw-r--r--lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp4
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp70
-rw-r--r--lib/Transforms/Scalar/LICM.cpp4
-rw-r--r--lib/Transforms/Scalar/LoadCombine.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopDistribute.cpp40
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopInstSimplify.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopInterchange.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopRerollPass.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp18
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp18
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp2
-rw-r--r--lib/Transforms/Scalar/LowerAtomic.cpp2
-rw-r--r--lib/Transforms/Scalar/LowerExpectIntrinsic.cpp2
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp19
-rw-r--r--lib/Transforms/Scalar/MergedLoadStoreMotion.cpp24
-rw-r--r--lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp2
-rw-r--r--lib/Transforms/Scalar/PlaceSafepoints.cpp4
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp4
-rw-r--r--lib/Transforms/Scalar/Reg2Mem.cpp2
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp19
-rw-r--r--lib/Transforms/Scalar/SROA.cpp12
-rw-r--r--lib/Transforms/Scalar/SampleProfile.cpp4
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp4
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp2
-rw-r--r--lib/Transforms/Scalar/Sink.cpp2
-rw-r--r--lib/Transforms/Scalar/StraightLineStrengthReduce.cpp13
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp7
43 files changed, 218 insertions, 246 deletions
diff --git a/lib/Transforms/Scalar/ADCE.cpp b/lib/Transforms/Scalar/ADCE.cpp
index d6fc91641588..fe0224bb56c7 100644
--- a/lib/Transforms/Scalar/ADCE.cpp
+++ b/lib/Transforms/Scalar/ADCE.cpp
@@ -44,7 +44,7 @@ struct ADCE : public FunctionPass {
AU.setPreservesCFG();
}
};
-}
+} // namespace
char ADCE::ID = 0;
INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false)
diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index 8918909f484a..a4e5446a2b12 100644
--- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -76,7 +76,7 @@ struct AlignmentFromAssumptions : public FunctionPass {
const SCEV *&OffSCEV);
bool processAssumption(CallInst *I);
};
-}
+} // namespace
char AlignmentFromAssumptions::ID = 0;
static const char aip_name[] = "Alignment from assumptions";
diff --git a/lib/Transforms/Scalar/BDCE.cpp b/lib/Transforms/Scalar/BDCE.cpp
index 09c605e76737..8ffbacddda68 100644
--- a/lib/Transforms/Scalar/BDCE.cpp
+++ b/lib/Transforms/Scalar/BDCE.cpp
@@ -66,7 +66,7 @@ struct BDCE : public FunctionPass {
AssumptionCache *AC;
DominatorTree *DT;
};
-}
+} // namespace
char BDCE::ID = 0;
INITIALIZE_PASS_BEGIN(BDCE, "bdce", "Bit-Tracking Dead Code Elimination",
diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp
index 4288742dd3eb..cc1dc9435a05 100644
--- a/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -171,7 +171,7 @@ private:
void deleteDeadCastInst() const;
bool optimizeConstants(Function &Fn);
};
-}
+} // namespace
char ConstantHoisting::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantHoisting, "consthoist", "Constant Hoisting",
diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp
index c974ebb9456f..e3df86ecf169 100644
--- a/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/lib/Transforms/Scalar/ConstantProp.cpp
@@ -47,7 +47,7 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
-}
+} // namespace
char ConstantPropagation::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantPropagation, "constprop",
diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 79624b2e4c47..b1809b7fae08 100644
--- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -56,7 +56,7 @@ namespace {
AU.addRequired<LazyValueInfo>();
}
};
-}
+} // namespace
char CorrelatedValuePropagation::ID = 0;
INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
diff --git a/lib/Transforms/Scalar/DCE.cpp b/lib/Transforms/Scalar/DCE.cpp
index 3b262a23091f..aa628e5aca81 100644
--- a/lib/Transforms/Scalar/DCE.cpp
+++ b/lib/Transforms/Scalar/DCE.cpp
@@ -60,7 +60,7 @@ namespace {
AU.setPreservesCFG();
}
};
-}
+} // namespace
char DeadInstElimination::ID = 0;
INITIALIZE_PASS(DeadInstElimination, "die",
@@ -87,7 +87,7 @@ namespace {
AU.setPreservesCFG();
}
};
-}
+} // namespace
char DCE::ID = 0;
INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index eb48a766a2cf..c99dc5fc8445 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -78,7 +78,7 @@ namespace {
bool runOnBasicBlock(BasicBlock &BB);
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
- void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+ void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL);
@@ -92,7 +92,7 @@ namespace {
AU.addPreserved<MemoryDependenceAnalysis>();
}
};
-}
+} // namespace
char DSE::ID = 0;
INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
@@ -194,37 +194,37 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
-static AliasAnalysis::Location
-getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return MemoryLocation::get(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
- AliasAnalysis::Location Loc = MemoryLocation::getForDest(MI);
+ MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
- if (!II) return AliasAnalysis::Location();
+ if (!II)
+ return MemoryLocation();
switch (II->getIntrinsicID()) {
- default: return AliasAnalysis::Location(); // Unhandled intrinsic.
+ default:
+ return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
- return AliasAnalysis::Location(II->getArgOperand(0));
+ return MemoryLocation(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- return AliasAnalysis::Location(II->getArgOperand(1), Len);
+ return MemoryLocation(II->getArgOperand(1), Len);
}
}
}
/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
/// instruction if any.
-static AliasAnalysis::Location
-getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
"Unknown instruction case");
@@ -232,7 +232,7 @@ getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
// instructions (memcpy/memmove).
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
return MemoryLocation::getForSource(MTI);
- return AliasAnalysis::Location();
+ return MemoryLocation();
}
@@ -317,7 +317,7 @@ static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
uint64_t Size;
if (getObjectSize(V, Size, DL, TLI))
return Size;
- return AliasAnalysis::UnknownSize;
+ return MemoryLocation::UnknownSize;
}
namespace {
@@ -333,8 +333,8 @@ namespace {
/// completely overwrites a store to the 'Earlier' location.
/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
-static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
- const AliasAnalysis::Location &Earlier,
+static OverwriteResult isOverwrite(const MemoryLocation &Later,
+ const MemoryLocation &Earlier,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
int64_t &EarlierOff, int64_t &LaterOff) {
@@ -346,8 +346,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
if (P1 == P2) {
// If we don't know the sizes of either access, then we can't do a
// comparison.
- if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize)
+ if (Later.Size == MemoryLocation::UnknownSize ||
+ Earlier.Size == MemoryLocation::UnknownSize)
return OverwriteUnknown;
// Make sure that the Later size is >= the Earlier size.
@@ -357,8 +357,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
- if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize)
+ if (Later.Size == MemoryLocation::UnknownSize ||
+ Earlier.Size == MemoryLocation::UnknownSize)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
@@ -374,7 +374,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// If the "Later" store is to a recognizable object, get its size.
uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
- if (ObjectSize != AliasAnalysis::UnknownSize)
+ if (ObjectSize != MemoryLocation::UnknownSize)
if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
return OverwriteComplete;
@@ -441,11 +441,11 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
/// This function detects when it is unsafe to remove a dependent instruction
/// because the DSE inducing instruction may be a self-read.
static bool isPossibleSelfRead(Instruction *Inst,
- const AliasAnalysis::Location &InstStoreLoc,
+ const MemoryLocation &InstStoreLoc,
Instruction *DepWrite, AliasAnalysis &AA) {
// Self reads can only happen for instructions that read memory. Get the
// location read.
- AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
+ MemoryLocation InstReadLoc = getLocForRead(Inst, AA);
if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
// If the read and written loc obviously don't alias, it isn't a read.
@@ -459,7 +459,7 @@ static bool isPossibleSelfRead(Instruction *Inst,
// Here we don't know if A/B may alias, but we do know that B/B are must
// aliases, so removing the first memcpy is safe (assuming it writes <= #
// bytes as the second one.
- AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
+ MemoryLocation DepReadLoc = getLocForRead(DepWrite, AA);
if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
return false;
@@ -525,7 +525,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
}
// Figure out what location is being stored to.
- AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
+ MemoryLocation Loc = getLocForWrite(Inst, *AA);
// If we didn't get a useful location, fail.
if (!Loc.Ptr)
@@ -540,7 +540,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
//
// Find out what memory location the dependent instruction stores.
Instruction *DepWrite = InstDep.getInst();
- AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
+ MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
// If we didn't get a useful location, or if it isn't a size, bail out.
if (!DepLoc.Ptr)
break;
@@ -645,7 +645,7 @@ static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
bool DSE::HandleFree(CallInst *F) {
bool MadeChange = false;
- AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
+ MemoryLocation Loc = MemoryLocation(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
const DataLayout &DL = F->getModule()->getDataLayout();
@@ -809,7 +809,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
- AliasAnalysis::Location LoadedLoc;
+ MemoryLocation LoadedLoc;
// If we encounter a use of the pointer, it is no longer considered dead
if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
@@ -845,7 +845,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
-void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL) {
const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
@@ -864,8 +864,8 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
// Remove objects that could alias LoadedLoc.
DeadStackObjects.remove_if([&](Value *I) {
// See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(
- I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
+ MemoryLocation StackLoc(I,
+ getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return !AA->isNoAlias(StackLoc, LoadedLoc);
});
}
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index d536a937dce1..8b629eaca9d4 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -72,7 +72,7 @@ struct SimpleValue {
isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
}
};
-}
+} // namespace
namespace llvm {
template <> struct DenseMapInfo<SimpleValue> {
@@ -85,7 +85,7 @@ template <> struct DenseMapInfo<SimpleValue> {
static unsigned getHashValue(SimpleValue Val);
static bool isEqual(SimpleValue LHS, SimpleValue RHS);
};
-}
+} // namespace llvm
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
@@ -219,7 +219,7 @@ struct CallValue {
return true;
}
};
-}
+} // namespace
namespace llvm {
template <> struct DenseMapInfo<CallValue> {
@@ -232,7 +232,7 @@ template <> struct DenseMapInfo<CallValue> {
static unsigned getHashValue(CallValue Val);
static bool isEqual(CallValue LHS, CallValue RHS);
};
-}
+} // namespace llvm
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
Instruction *Inst = Val.Inst;
@@ -447,7 +447,7 @@ private:
ExpectedType);
}
};
-}
+} // namespace
bool EarlyCSE::processNode(DomTreeNode *Node) {
BasicBlock *BB = Node->getBlock();
@@ -764,7 +764,7 @@ public:
AU.setPreservesCFG();
}
};
-}
+} // namespace
char EarlyCSELegacyPass::ID = 0;
diff --git a/lib/Transforms/Scalar/FlattenCFGPass.cpp b/lib/Transforms/Scalar/FlattenCFGPass.cpp
index 0430c1898c8d..dd6ea8d455c5 100644
--- a/lib/Transforms/Scalar/FlattenCFGPass.cpp
+++ b/lib/Transforms/Scalar/FlattenCFGPass.cpp
@@ -36,7 +36,7 @@ public:
private:
AliasAnalysis *AA;
};
-}
+} // namespace
char FlattenCFGPass::ID = 0;
INITIALIZE_PASS_BEGIN(FlattenCFGPass, "flattencfg", "Flatten the CFG", false,
diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp
index c9314229c38b..bb90c5f73239 100644
--- a/lib/Transforms/Scalar/Float2Int.cpp
+++ b/lib/Transforms/Scalar/Float2Int.cpp
@@ -79,7 +79,7 @@ namespace {
MapVector<Instruction*, Value*> ConvertedInsts;
LLVMContext *Ctx;
};
-}
+} // namespace
char Float2Int::ID = 0;
INITIALIZE_PASS(Float2Int, "float2int", "Float to int", false, false)
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 7770ddcb9d7a..d9308c4e3710 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -138,7 +138,7 @@ namespace {
uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
void verifyRemoved(const Value *) const;
};
-}
+} // namespace
namespace llvm {
template <> struct DenseMapInfo<Expression> {
@@ -159,7 +159,7 @@ template <> struct DenseMapInfo<Expression> {
}
};
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// ValueTable Internal Functions
@@ -723,7 +723,7 @@ namespace {
};
char GVN::ID = 0;
-}
+} // namespace
// The public interface to this file...
FunctionPass *llvm::createGVNPass(bool NoLoads) {
@@ -852,13 +852,12 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
/// If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
-/// the stored value. LoadedTy is the type of the load we want to replace and
-/// InsertPt is the place to insert new instructions.
+/// the stored value. LoadedTy is the type of the load we want to replace.
+/// IRB is IRBuilder used to insert new instructions.
///
/// If we can't do it, return null.
-static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
- Type *LoadedTy,
- Instruction *InsertPt,
+static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
+ IRBuilder<> &IRB,
const DataLayout &DL) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
return nullptr;
@@ -874,12 +873,12 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Pointer to Pointer -> use bitcast.
if (StoredValTy->getScalarType()->isPointerTy() &&
LoadedTy->getScalarType()->isPointerTy())
- return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
+ return IRB.CreateBitCast(StoredVal, LoadedTy);
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->getScalarType()->isPointerTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
- StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy);
}
Type *TypeToCastTo = LoadedTy;
@@ -887,11 +886,11 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
- StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
+ StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo);
// Cast to pointer if the load needs a pointer type.
if (LoadedTy->getScalarType()->isPointerTy())
- StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
+ StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy);
return StoredVal;
}
@@ -904,35 +903,34 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->getScalarType()->isPointerTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
- StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy);
}
// Convert vectors and fp to integer, which can be manipulated.
if (!StoredValTy->isIntegerTy()) {
StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
- StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy);
}
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
if (DL.isBigEndian()) {
- Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
- StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
+ StoredVal = IRB.CreateLShr(StoredVal, StoreSize - LoadSize, "tmp");
}
// Truncate the integer to the right size now.
Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
- StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
+ StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc");
if (LoadedTy == NewIntTy)
return StoredVal;
// If the result is a pointer, inttoptr.
if (LoadedTy->getScalarType()->isPointerTy())
- return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
+ return IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr");
// Otherwise, bitcast.
- return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
+ return IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast");
}
/// This function is called when we have a
@@ -1122,7 +1120,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
@@ -1145,7 +1143,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
- return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
+ return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL);
}
/// This function is called when we have a
@@ -1219,7 +1217,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
// We know that this method is only called when the mem transfer fully
// provides the bits for the load.
@@ -1248,7 +1246,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
++NumBytesSet;
}
- return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);
+ return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL);
}
// Otherwise, this is a memcpy/memmove from a constant global.
@@ -1695,6 +1693,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
@@ -1761,6 +1761,8 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (isa<PHINode>(V))
V->takeName(LI);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
@@ -1928,8 +1930,9 @@ bool GVN::processLoad(LoadInst *L) {
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
+ IRBuilder<> Builder(L);
StoredVal =
- CoerceAvailableValueToLoadType(StoredVal, L->getType(), L, DL);
+ CoerceAvailableValueToLoadType(StoredVal, L->getType(), Builder, DL);
if (!StoredVal)
return false;
@@ -1953,7 +1956,9 @@ bool GVN::processLoad(LoadInst *L) {
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
- AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L, DL);
+ IRBuilder<> Builder(L);
+ AvailableVal =
+ CoerceAvailableValueToLoadType(DepLI, L->getType(), Builder, DL);
if (!AvailableVal)
return false;
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 359a616c069d..e931382ea98f 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -136,7 +136,7 @@ namespace {
void SinkUnusedInvariants(Loop *L);
};
-}
+} // namespace
char IndVarSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
@@ -494,7 +494,7 @@ struct RewritePhi {
RewritePhi(PHINode *P, unsigned I, Value *V, bool H, bool S)
: PN(P), Ith(I), Val(V), HighCost(H), SafePhi(S) {}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// RewriteLoopExitValues - Optimize IV users outside the loop.
@@ -758,7 +758,7 @@ namespace {
WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr),
IsSigned(false) {}
};
-}
+} // namespace
/// visitCast - Update information about the induction variable that is
/// extended by this sign or zero extend operation. This is used to determine
@@ -1321,7 +1321,7 @@ namespace {
// Implement the interface used by simplifyUsersOfIV.
void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); }
};
-}
+} // namespace
/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV
/// users. Each successive simplification may push more users which may
@@ -2013,10 +2013,11 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Now that we're done iterating through lists, clean up any instructions
// which are now dead.
- while (!DeadInsts.empty())
- if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
+ while (!DeadInsts.empty()) {
+ Value *V = static_cast<Value *>(DeadInsts.pop_back_val());
+ if (Instruction *Inst = dyn_cast_or_null<Instruction>(V))
RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
+ }
// The Rewriter may not be used from this point on.
diff --git a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index cbdacad8f28b..ce1a0ca8c7d9 100644
--- a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -222,7 +222,7 @@ public:
};
char InductiveRangeCheckElimination::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(InductiveRangeCheckElimination, "irce",
"Inductive range check elimination", false, false)
@@ -618,7 +618,7 @@ public:
bool run();
};
-}
+} // namespace
void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
BasicBlock *ReplaceBy) {
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 711df417992b..7316db6ca02c 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -138,7 +138,7 @@ namespace {
bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
};
-}
+} // namespace
char JumpThreading::ID = 0;
INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
@@ -758,67 +758,33 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
- // For a comparison where the LHS is outside this block, it's possible
- // that we've branched on it before. Used LVI to see if we can simplify
- // the branch based on that.
+ // If we're branching on a conditional, LVI might be able to determine
+ // it's value at the branch instruction. We only handle comparisons
+ // against a constant at this time.
+ // TODO: This should be extended to handle switches as well.
BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
- pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- if (CondBr && CondConst && CondBr->isConditional() && PI != PE &&
- (!isa<Instruction>(CondCmp->getOperand(0)) ||
- cast<Instruction>(CondCmp->getOperand(0))->getParent() != BB)) {
- // For predecessor edge, determine if the comparison is true or false
- // on that edge. If they're all true or all false, we can simplify the
- // branch.
- // FIXME: We could handle mixed true/false by duplicating code.
- LazyValueInfo::Tristate Baseline =
- LVI->getPredicateOnEdge(CondCmp->getPredicate(), CondCmp->getOperand(0),
- CondConst, *PI, BB, CondCmp);
- if (Baseline != LazyValueInfo::Unknown) {
- // Check that all remaining incoming values match the first one.
- while (++PI != PE) {
- LazyValueInfo::Tristate Ret =
- LVI->getPredicateOnEdge(CondCmp->getPredicate(),
- CondCmp->getOperand(0), CondConst, *PI, BB,
- CondCmp);
- if (Ret != Baseline) break;
- }
-
- // If we terminated early, then one of the values didn't match.
- if (PI == PE) {
- unsigned ToRemove = Baseline == LazyValueInfo::True ? 1 : 0;
- unsigned ToKeep = Baseline == LazyValueInfo::True ? 0 : 1;
- CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
- BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
- CondBr->eraseFromParent();
- if (CondCmp->use_empty())
- CondCmp->eraseFromParent();
- else if (CondCmp->getParent() == BB) {
- // If the fact we just learned is true for all uses of the
- // condition, replace it with a constant value
- auto *CI = Baseline == LazyValueInfo::True ?
- ConstantInt::getTrue(CondCmp->getType()) :
- ConstantInt::getFalse(CondCmp->getType());
- CondCmp->replaceAllUsesWith(CI);
- CondCmp->eraseFromParent();
- }
- return true;
- }
- }
-
- } else if (CondBr && CondConst && CondBr->isConditional()) {
- // There might be an invariant in the same block with the conditional
- // that can determine the predicate.
-
+ if (CondBr && CondConst && CondBr->isConditional()) {
LazyValueInfo::Tristate Ret =
LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0),
- CondConst, CondCmp);
+ CondConst, CondBr);
if (Ret != LazyValueInfo::Unknown) {
unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0;
unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1;
CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
CondBr->eraseFromParent();
+ if (CondCmp->use_empty())
+ CondCmp->eraseFromParent();
+ else if (CondCmp->getParent() == BB) {
+ // If the fact we just learned is true for all uses of the
+ // condition, replace it with a constant value
+ auto *CI = Ret == LazyValueInfo::True ?
+ ConstantInt::getTrue(CondCmp->getType()) :
+ ConstantInt::getFalse(CondCmp->getType());
+ CondCmp->replaceAllUsesWith(CI);
+ CondCmp->eraseFromParent();
+ }
return true;
}
}
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index f0e6d641b180..e5019463bb5f 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -156,7 +156,7 @@ namespace {
/// Simple Analysis hook. Delete loop L from alias set map.
void deleteAnalysisLoop(Loop *L) override;
};
-}
+} // namespace
char LICM::ID = 0;
INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -777,7 +777,7 @@ namespace {
AST.deleteValue(I);
}
};
-} // end anon namespace
+} // namespace
/// Try to promote memory values to scalars by sinking stores out of the
/// loop and moving loads to before the loop. We do this by looping over
diff --git a/lib/Transforms/Scalar/LoadCombine.cpp b/lib/Transforms/Scalar/LoadCombine.cpp
index c19cd19059b2..3dbf6ac6ed08 100644
--- a/lib/Transforms/Scalar/LoadCombine.cpp
+++ b/lib/Transforms/Scalar/LoadCombine.cpp
@@ -77,7 +77,7 @@ private:
bool aggregateLoads(SmallVectorImpl<LoadPOPPair> &);
bool combineLoads(SmallVectorImpl<LoadPOPPair> &);
};
-}
+} // namespace
bool LoadCombine::doInitialization(Function &F) {
DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index 98b068edf582..02760ffe2c68 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -57,7 +57,7 @@ namespace {
bool &Changed, BasicBlock *Preheader);
};
-}
+} // namespace
char LoopDeletion::ID = 0;
INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
diff --git a/lib/Transforms/Scalar/LoopDistribute.cpp b/lib/Transforms/Scalar/LoopDistribute.cpp
index a907d596e35b..d21a7db48c51 100644
--- a/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -630,26 +630,17 @@ private:
};
/// \brief Handles the loop versioning based on memchecks.
-class RuntimeCheckEmitter {
+class LoopVersioning {
public:
- RuntimeCheckEmitter(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
- DominatorTree *DT)
- : OrigLoop(L), NonDistributedLoop(nullptr), LAI(LAI), LI(LI), DT(DT) {}
-
- /// \brief Given the \p Partitions formed by Loop Distribution, it determines
- /// in which partition each pointer is used.
- void partitionPointers(InstPartitionContainer &Partitions) {
- // Set up partition id in PtrRtChecks. Ptr -> Access -> Intruction ->
- // Partition.
- PtrToPartition = Partitions.computePartitionSetForPointers(LAI);
-
- DEBUG(dbgs() << "\nPointers:\n");
- DEBUG(LAI.getRuntimePointerCheck()->print(dbgs(), 0, &PtrToPartition));
- }
+ LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+ DominatorTree *DT,
+ const SmallVector<int, 8> *PtrToPartition = nullptr)
+ : OrigLoop(L), NonDistributedLoop(nullptr),
+ PtrToPartition(PtrToPartition), LAI(LAI), LI(LI), DT(DT) {}
/// \brief Returns true if we need memchecks to distribute the loop.
bool needsRuntimeChecks() const {
- return LAI.getRuntimePointerCheck()->needsAnyChecking(&PtrToPartition);
+ return LAI.getRuntimePointerCheck()->needsAnyChecking(PtrToPartition);
}
/// \brief Performs the CFG manipulation part of versioning the loop including
@@ -660,7 +651,7 @@ public:
// Add the memcheck in the original preheader (this is empty initially).
BasicBlock *MemCheckBB = OrigLoop->getLoopPreheader();
std::tie(FirstCheckInst, MemRuntimeCheck) =
- LAI.addRuntimeCheck(MemCheckBB->getTerminator(), &PtrToPartition);
+ LAI.addRuntimeCheck(MemCheckBB->getTerminator(), PtrToPartition);
assert(MemRuntimeCheck && "called even though needsAnyChecking = false");
// Rename the block to make the IR more readable.
@@ -733,10 +724,11 @@ private:
Loop *NonDistributedLoop;
/// \brief For each memory pointer it contains the partitionId it is used in.
+ /// If nullptr, no partitioning is used.
///
/// The I-th entry corresponds to I-th entry in LAI.getRuntimePointerCheck().
/// If the pointer is used in multiple partitions the entry is set to -1.
- SmallVector<int, 8> PtrToPartition;
+ const SmallVector<int, 8> *PtrToPartition;
/// \brief This maps the instructions from OrigLoop to their counterpart in
/// NonDistributedLoop.
@@ -929,11 +921,13 @@ private:
// If we need run-time checks to disambiguate pointers are run-time, version
// the loop now.
- RuntimeCheckEmitter RtCheckEmitter(LAI, L, LI, DT);
- RtCheckEmitter.partitionPointers(Partitions);
- if (RtCheckEmitter.needsRuntimeChecks()) {
- RtCheckEmitter.versionLoop(this);
- RtCheckEmitter.addPHINodes(DefsUsedOutside);
+ auto PtrToPartition = Partitions.computePartitionSetForPointers(LAI);
+ LoopVersioning LVer(LAI, L, LI, DT, &PtrToPartition);
+ if (LVer.needsRuntimeChecks()) {
+ DEBUG(dbgs() << "\nPointers:\n");
+ DEBUG(LAI.getRuntimePointerCheck()->print(dbgs(), 0, &PtrToPartition));
+ LVer.versionLoop(this);
+ LVer.addPHINodes(DefsUsedOutside);
}
// Create identical copies of the original loop for each partition and hook
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index f92ecd4efdae..3de1333a7c98 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -209,7 +209,7 @@ namespace {
bool runOnNoncountableLoop();
bool runOnCountableLoop();
};
-}
+} // namespace
char LoopIdiomRecognize::ID = 0;
INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
@@ -833,7 +833,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
// Get the location that may be stored across the loop. Since the access is
// strided positively through memory, we say that the modified location starts
// at the pointer and has infinite size.
- uint64_t AccessSize = AliasAnalysis::UnknownSize;
+ uint64_t AccessSize = MemoryLocation::UnknownSize;
// If the loop iterates a fixed number of times, we can refine the access size
// to be exactly the size of the memset, which is (BECount+1)*StoreSize
@@ -844,7 +844,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
// operand in the store. Store to &A[i] of 100 will always return may alias
// with store of &A[100], we need to StoreLoc to be "A" with size of 100,
// which will then no-alias a store to &A[100].
- AliasAnalysis::Location StoreLoc(Ptr, AccessSize);
+ MemoryLocation StoreLoc(Ptr, AccessSize);
for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
++BI)
diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp
index e12502654751..4c40f249ce1d 100644
--- a/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -52,7 +52,7 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
-}
+} // namespace
char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp
index f584018299d1..25546553fd4d 100644
--- a/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -598,8 +598,8 @@ struct LoopInterchange : public FunctionPass {
bool LoopInterchangeLegality::areAllUsesReductions(Instruction *Ins, Loop *L) {
return !std::any_of(Ins->user_begin(), Ins->user_end(), [=](User *U) -> bool {
PHINode *UserIns = dyn_cast<PHINode>(U);
- ReductionDescriptor RD;
- return !UserIns || !ReductionDescriptor::isReductionPHI(UserIns, L, RD);
+ RecurrenceDescriptor RD;
+ return !UserIns || !RecurrenceDescriptor::isReductionPHI(UserIns, L, RD);
});
}
@@ -697,12 +697,12 @@ bool LoopInterchangeLegality::findInductionAndReductions(
if (!L->getLoopLatch() || !L->getLoopPredecessor())
return false;
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- ReductionDescriptor RD;
+ RecurrenceDescriptor RD;
PHINode *PHI = cast<PHINode>(I);
ConstantInt *StepValue = nullptr;
if (isInductionPHI(PHI, SE, StepValue))
Inductions.push_back(PHI);
- else if (ReductionDescriptor::isReductionPHI(PHI, L, RD))
+ else if (RecurrenceDescriptor::isReductionPHI(PHI, L, RD))
Reductions.push_back(PHI);
else {
DEBUG(
diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp
index ed103e6b8ed6..f6db9b114e3f 100644
--- a/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -438,7 +438,7 @@ namespace {
bool reroll(Instruction *IV, Loop *L, BasicBlock *Header, const SCEV *IterCount,
ReductionTracker &Reductions);
};
-}
+} // namespace
char LoopReroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false)
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index a675e1289baf..2ba70ad1f1a7 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -79,7 +79,7 @@ namespace {
AssumptionCache *AC;
DominatorTree *DT;
};
-}
+} // namespace
char LoopRotate::ID = 0;
INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 4b59f3d2f6cc..ee7248691992 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -116,7 +116,7 @@ public:
void dump() const;
};
-}
+} // namespace
void RegSortData::print(raw_ostream &OS) const {
OS << "[NumUses=" << UsedByIndices.count() << ']';
@@ -157,7 +157,7 @@ public:
const_iterator end() const { return RegSequence.end(); }
};
-}
+} // namespace
void
RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
@@ -281,7 +281,7 @@ struct Formula {
void dump() const;
};
-}
+} // namespace
/// DoInitialMatch - Recursion helper for InitialMatch.
static void DoInitialMatch(const SCEV *S, Loop *L,
@@ -903,7 +903,7 @@ private:
SmallPtrSetImpl<const SCEV *> *LoserRegs);
};
-}
+} // namespace
/// RateRegister - Tally up interesting quantities from the given register.
void Cost::RateRegister(const SCEV *Reg,
@@ -1102,7 +1102,7 @@ struct LSRFixup {
void dump() const;
};
-}
+} // namespace
LSRFixup::LSRFixup()
: UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)),
@@ -1252,7 +1252,7 @@ public:
void dump() const;
};
-}
+} // namespace
/// HasFormula - Test whether this use as a formula which has the same
/// registers as the given formula.
@@ -1791,7 +1791,7 @@ public:
void dump() const;
};
-}
+} // namespace
/// OptimizeShadowIV - If IV is used in a int-to-float cast
/// inside the loop then try to eliminate the cast operation.
@@ -3644,7 +3644,7 @@ struct WorkItem {
void dump() const;
};
-}
+} // namespace
void WorkItem::print(raw_ostream &OS) const {
OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
@@ -4949,7 +4949,7 @@ private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
-}
+} // namespace
char LoopStrengthReduce::ID = 0;
INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 4ccbfc953e0c..d702dc0b4ee9 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -229,7 +229,7 @@ namespace {
unsigned DynamicCostSavingsDiscount,
uint64_t UnrolledCost, uint64_t RolledDynamicCost);
};
-}
+} // namespace
char LoopUnroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
@@ -455,13 +455,15 @@ struct EstimatedUnrollCost {
///
/// Complete loop unrolling can make some loads constant, and we need to know
/// if that would expose any further optimization opportunities. This routine
-/// estimates this optimization. It assigns computed number of instructions,
-/// that potentially might be optimized away, to
-/// NumberOfOptimizedInstructions, and total number of instructions to
-/// UnrolledLoopSize (not counting blocks that won't be reached, if we were
-/// able to compute the condition).
-/// \returns false if we can't analyze the loop, or if we discovered that
-/// unrolling won't give anything. Otherwise, returns true.
+/// estimates this optimization. It computes cost of unrolled loop
+/// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By
+/// dynamic cost we mean that we won't count costs of blocks that are known not
+/// to be executed (i.e. if we have a branch in the loop and we know that at the
+/// given iteration its condition would be resolved to true, we won't add up the
+/// cost of the 'false'-block).
+/// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
+/// the analysis failed (no benefits expected from the unrolling, or the loop is
+/// too big to analyze), the returned value is None.
Optional<EstimatedUnrollCost>
analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, ScalarEvolution &SE,
const TargetTransformInfo &TTI,
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 988d2af3ea90..5bdc2ec88d4a 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -213,7 +213,7 @@ namespace {
BasicBlock **LoopExit = nullptr);
};
-}
+} // namespace
// Analyze loop. Check its size, calculate is it possible to unswitch
// it. Returns true if we can unswitch this loop.
diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp
index 3314e1ed41ab..b8b35d4249f0 100644
--- a/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -138,7 +138,7 @@ namespace {
return Changed;
}
};
-}
+} // namespace
char LowerAtomic::ID = 0;
INITIALIZE_PASS(LowerAtomic, "loweratomic",
diff --git a/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
index 0c47cbd5bfda..b845c038e67e 100644
--- a/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
+++ b/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
@@ -181,7 +181,7 @@ public:
bool runOnFunction(Function &F) override { return lowerExpectIntrinsic(F); }
};
-}
+} // namespace
char LowerExpectIntrinsic::ID = 0;
INITIALIZE_PASS(LowerExpectIntrinsic, "lower-expect",
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 2bdf670f67e3..2c9f93513ae2 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -153,7 +153,7 @@ struct MemsetRange {
bool isProfitableToUseMemset(const DataLayout &DL) const;
};
-} // end anon namespace
+} // namespace
bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
@@ -237,7 +237,7 @@ public:
};
-} // end anon namespace
+} // namespace
/// addRange - Add a new store to the MemsetRanges data structure. This adds a
@@ -337,7 +337,7 @@ namespace {
AU.addPreserved<MemoryDependenceAnalysis>();
}
- // Helper fuctions
+ // Helper functions
bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
bool processMemCpy(MemCpyInst *M);
@@ -355,7 +355,7 @@ namespace {
};
char MemCpyOpt::ID = 0;
-}
+} // namespace
// createMemCpyOptPass - The public interface to this file...
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
@@ -510,7 +510,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// Check that nothing touches the dest of the "copy" between
// the call and the store.
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- AliasAnalysis::Location StoreLoc = MemoryLocation::get(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
E = C; I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
@@ -997,7 +997,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
}
}
- AliasAnalysis::Location SrcLoc = MemoryLocation::getForSource(M);
+ MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
M, M->getParent());
@@ -1075,10 +1075,9 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
- MemDepResult DepInfo =
- MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
- true, CS.getInstruction(),
- CS.getInstruction()->getParent());
+ MemDepResult DepInfo = MD->getPointerDependencyFrom(
+ MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(),
+ CS.getInstruction()->getParent());
if (!DepInfo.isClobber())
return false;
diff --git a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 776dfb4d487f..886b6f5b0a2c 100644
--- a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -144,9 +144,8 @@ private:
// Routines for sinking stores
StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI);
PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1);
- bool isStoreSinkBarrierInRange(const Instruction& Start,
- const Instruction& End,
- AliasAnalysis::Location Loc);
+ bool isStoreSinkBarrierInRange(const Instruction &Start,
+ const Instruction &End, MemoryLocation Loc);
bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst);
bool mergeStores(BasicBlock *BB);
// The mergeLoad/Store algorithms could have Size0 * Size1 complexity,
@@ -157,7 +156,7 @@ private:
};
char MergedLoadStoreMotion::ID = 0;
-}
+} // namespace
///
/// \brief createMergedLoadStoreMotionPass - The public interface to this file.
@@ -241,7 +240,7 @@ bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
bool MergedLoadStoreMotion::isLoadHoistBarrierInRange(const Instruction& Start,
const Instruction& End,
LoadInst* LI) {
- AliasAnalysis::Location Loc = MemoryLocation::get(LI);
+ MemoryLocation Loc = MemoryLocation::get(LI);
return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::Mod);
}
@@ -266,8 +265,8 @@ LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB1,
LoadInst *Load1 = dyn_cast<LoadInst>(Inst);
BasicBlock *BB0 = Load0->getParent();
- AliasAnalysis::Location Loc0 = MemoryLocation::get(Load0);
- AliasAnalysis::Location Loc1 = MemoryLocation::get(Load1);
+ MemoryLocation Loc0 = MemoryLocation::get(Load0);
+ MemoryLocation Loc1 = MemoryLocation::get(Load1);
if (AA->isMustAlias(Loc0, Loc1) && Load0->isSameOperationAs(Load1) &&
!isLoadHoistBarrierInRange(BB1->front(), *Load1, Load1) &&
!isLoadHoistBarrierInRange(BB0->front(), *Load0, Load0)) {
@@ -400,10 +399,9 @@ bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) {
/// happening it is considered a sink barrier.
///
-bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction& Start,
- const Instruction& End,
- AliasAnalysis::Location
- Loc) {
+bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
+ const Instruction &End,
+ MemoryLocation Loc) {
return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::ModRef);
}
@@ -425,8 +423,8 @@ StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
StoreInst *Store1 = cast<StoreInst>(Inst);
- AliasAnalysis::Location Loc0 = MemoryLocation::get(Store0);
- AliasAnalysis::Location Loc1 = MemoryLocation::get(Store1);
+ MemoryLocation Loc0 = MemoryLocation::get(Store0);
+ MemoryLocation Loc1 = MemoryLocation::get(Store1);
if (AA->isMustAlias(Loc0, Loc1) && Store0->isSameOperationAs(Store1) &&
!isStoreSinkBarrierInRange(*(std::next(BasicBlock::iterator(Store1))),
BB1->back(), Loc1) &&
diff --git a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 31d7df39c781..5423499723f7 100644
--- a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -46,7 +46,7 @@ namespace {
};
char PartiallyInlineLibCalls::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(PartiallyInlineLibCalls, "partially-inline-libcalls",
"Partially inline calls to library functions", false, false)
diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 9ecaf102574a..670dcd24f75c 100644
--- a/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -160,7 +160,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
AU.setPreservesAll();
}
};
-}
+} // namespace
static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
@@ -181,7 +181,7 @@ struct PlaceSafepoints : public FunctionPass {
// if that was worth doing
}
};
-}
+} // namespace
// Insert a safepoint poll immediately before the given instruction. Does
// not handle the parsability of state at the runtime call, that's the
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 6c66b58729e9..9842fd7bb6c7 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -154,7 +154,7 @@ namespace {
unsigned SymbolicRank;
bool isOr;
};
-}
+} // namespace
namespace {
class Reassociate : public FunctionPass {
@@ -197,7 +197,7 @@ namespace {
void OptimizeInst(Instruction *I);
Instruction *canonicalizeNegConstExpr(Instruction *I);
};
-}
+} // namespace
XorOpnd::XorOpnd(Value *V) {
assert(!isa<ConstantInt>(V) && "No ConstantInt");
diff --git a/lib/Transforms/Scalar/Reg2Mem.cpp b/lib/Transforms/Scalar/Reg2Mem.cpp
index 1b46727c17bb..2ff56e67c9c6 100644
--- a/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -58,7 +58,7 @@ namespace {
bool runOnFunction(Function &F) override;
};
-}
+} // namespace
char RegToMem::ID = 0;
INITIALIZE_PASS_BEGIN(RegToMem, "reg2mem", "Demote all values to stack slots",
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 6f6ba72c6e6f..c15bc1bd7eca 100644
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -183,7 +183,7 @@ struct PartiallyConstructedSafepointRecord {
/// Maps rematerialized copy to it's original value.
RematerializedValueMapTy RematerializedValues;
};
-}
+} // namespace
/// Compute the live-in set for every basic block in the function
static void computeLiveInValues(DominatorTree &DT, Function &F,
@@ -646,7 +646,7 @@ private:
llvm_unreachable("only three states!");
}
};
-}
+} // namespace
/// For a given value or instruction, figure out what base ptr it's derived
/// from. For gc objects, this is simply itself. On success, returns a value
/// which is the base pointer. (This is reliable and can be used for
@@ -1659,17 +1659,10 @@ static void relocationViaAlloca(
/// vector. Doing so has the effect of changing the output of a couple of
/// tests in ways which make them less useful in testing fused safepoints.
template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
- DenseSet<T> Seen;
- SmallVector<T, 128> TempVec;
- TempVec.reserve(Vec.size());
- for (auto Element : Vec)
- TempVec.push_back(Element);
- Vec.clear();
- for (auto V : TempVec) {
- if (Seen.insert(V).second) {
- Vec.push_back(V);
- }
- }
+ SmallSet<T, 8> Seen;
+ Vec.erase(std::remove_if(Vec.begin(), Vec.end(), [&](const T &V) {
+ return !Seen.insert(V).second;
+ }), Vec.end());
}
/// Insert holders so that each Value is obviously live through the entire
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 056dd11b5ab3..f38b2b1dbf96 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -127,7 +127,7 @@ typedef llvm::IRBuilder<true, ConstantFolder, IRBuilderPrefixedInserter<true>>
typedef llvm::IRBuilder<false, ConstantFolder, IRBuilderPrefixedInserter<false>>
IRBuilderTy;
#endif
-}
+} // namespace
namespace {
/// \brief A used slice of an alloca.
@@ -595,7 +595,7 @@ private:
/// the alloca.
SmallVector<Use *, 8> DeadOperands;
};
-}
+} // namespace
static Value *foldSelectInst(SelectInst &SI) {
// If the condition being selected on is a constant or the same value is
@@ -1173,7 +1173,7 @@ public:
}
}
};
-} // end anon namespace
+} // namespace
namespace {
/// \brief An optimization pass providing Scalar Replacement of Aggregates.
@@ -1268,7 +1268,7 @@ private:
void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
-}
+} // namespace
char SROA::ID = 0;
@@ -3119,7 +3119,7 @@ private:
return true;
}
};
-}
+} // namespace
namespace {
/// \brief Visitor to rewrite aggregate loads and stores as scalar.
@@ -3327,7 +3327,7 @@ private:
return false;
}
};
-}
+} // namespace
/// \brief Strip aggregate type wrapping.
///
diff --git a/lib/Transforms/Scalar/SampleProfile.cpp b/lib/Transforms/Scalar/SampleProfile.cpp
index 3480cd499127..69e3a67aa8c1 100644
--- a/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/lib/Transforms/Scalar/SampleProfile.cpp
@@ -174,7 +174,7 @@ protected:
/// \brief Flag indicating whether the profile input loaded successfully.
bool ProfileIsValid;
};
-}
+} // namespace
/// \brief Print the weight of edge \p E on stream \p OS.
///
@@ -282,7 +282,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) {
/// \brief Find equivalence classes for the given block.
///
/// This finds all the blocks that are guaranteed to execute the same
-/// number of times as \p BB1. To do this, it traverses all the the
+/// number of times as \p BB1. To do this, it traverses all the
/// descendants of \p BB1 in the dominator or post-dominator tree.
///
/// A block BB2 will be in the same equivalence class as \p BB1 if
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index d955da7ce75d..e42c3daab8d7 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -221,7 +221,7 @@ namespace {
}
};
-}
+} // namespace
char SROA_DT::ID = 0;
char SROA_SSAUp::ID = 0;
@@ -1123,7 +1123,7 @@ public:
}
}
};
-} // end anon namespace
+} // namespace
/// isSafeSelectToSpeculate - Select instructions that use an alloca and are
/// subsequently loaded can be rewritten to load both input pointers and then
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index f0e3ffdb95ac..0733daf40f39 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -220,7 +220,7 @@ struct CFGSimplifyPass : public FunctionPass {
AU.addRequired<TargetTransformInfoWrapperPass>();
}
};
-}
+} // namespace
char CFGSimplifyPass::ID = 0;
INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 078c6a921a08..f49f4eaaedcb 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -163,7 +163,7 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
}
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
- AliasAnalysis::Location Loc = MemoryLocation::get(L);
+ MemoryLocation Loc = MemoryLocation::get(L);
for (Instruction *S : Stores)
if (AA->getModRefInfo(S, Loc) & AliasAnalysis::Mod)
return false;
diff --git a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index 453503ab61da..f32769c24110 100644
--- a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -265,8 +265,10 @@ static bool isGEPFoldable(GetElementPtrInst *GEP,
BaseOffset += DL->getStructLayout(STy)->getElementOffset(Field);
}
}
+
+ unsigned AddrSpace = GEP->getPointerAddressSpace();
return TTI->isLegalAddressingMode(GEP->getType()->getElementType(), BaseGV,
- BaseOffset, HasBaseReg, Scale);
+ BaseOffset, HasBaseReg, Scale, AddrSpace);
}
// Returns whether (Base + Index * Stride) can be folded to an addressing mode.
@@ -630,6 +632,15 @@ void StraightLineStrengthReduce::rewriteCandidateWithBasis(
// trivially dead.
RecursivelyDeleteTriviallyDeadInstructions(Bump);
} else {
+ // It's tempting to preserve nsw on Bump and/or Reduced. However, it's
+ // usually unsound, e.g.,
+ //
+ // X = (-2 +nsw 1) *nsw INT_MAX
+ // Y = (-2 +nsw 3) *nsw INT_MAX
+ // =>
+ // Y = X + 2 * INT_MAX
+ //
+ // Neither + and * in the resultant expression are nsw.
Reduced = Builder.CreateAdd(Basis.Ins, Bump);
}
break;
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 9eef1327c3f6..d23f5153c188 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -120,7 +120,7 @@ namespace {
bool CanMoveAboveCall(Instruction *I, CallInst *CI);
Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI);
};
-}
+} // namespace
char TailCallElim::ID = 0;
INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim",
@@ -158,6 +158,9 @@ bool TailCallElim::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
+ if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
+ return false;
+
bool AllCallsAreTailCalls = false;
bool Modified = markTails(F, AllCallsAreTailCalls);
if (AllCallsAreTailCalls)
@@ -243,7 +246,7 @@ struct AllocaDerivedValueTracker {
SmallPtrSet<Instruction *, 32> AllocaUsers;
SmallPtrSet<Instruction *, 32> EscapePoints;
};
-}
+} // namespace
bool TailCallElim::markTails(Function &F, bool &AllCallsAreTailCalls) {
if (F.callsFunctionThatReturnsTwice())