aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroSplit.cpp98
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp13
-rw-r--r--llvm/lib/Transforms/Scalar/MergeICmps.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp25
6 files changed, 113 insertions, 51 deletions
diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index b6932dbbfc3f..fc83befe3950 100644
--- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -29,6 +29,7 @@
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
@@ -1174,6 +1175,15 @@ scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
DenseMap<Value *, Value *> ResolvedValues;
BasicBlock *UnconditionalSucc = nullptr;
+ assert(InitialInst->getModule());
+ const DataLayout &DL = InitialInst->getModule()->getDataLayout();
+
+ auto TryResolveConstant = [&ResolvedValues](Value *V) {
+ auto It = ResolvedValues.find(V);
+ if (It != ResolvedValues.end())
+ V = It->second;
+ return dyn_cast<ConstantInt>(V);
+ };
Instruction *I = InitialInst;
while (I->isTerminator() ||
@@ -1190,47 +1200,65 @@ static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
}
if (auto *BR = dyn_cast<BranchInst>(I)) {
if (BR->isUnconditional()) {
- BasicBlock *BB = BR->getSuccessor(0);
+ BasicBlock *Succ = BR->getSuccessor(0);
if (I == InitialInst)
- UnconditionalSucc = BB;
- scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
- I = BB->getFirstNonPHIOrDbgOrLifetime();
+ UnconditionalSucc = Succ;
+ scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
+ I = Succ->getFirstNonPHIOrDbgOrLifetime();
+ continue;
+ }
+
+ BasicBlock *BB = BR->getParent();
+ // Handle the case the condition of the conditional branch is constant.
+ // e.g.,
+ //
+ // br i1 false, label %cleanup, label %CoroEnd
+ //
+ // It is possible during the transformation. We could continue the
+ // simplifying in this case.
+ if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
+ // Handle this branch in next iteration.
+ I = BB->getTerminator();
continue;
}
} else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
+ // If the case number of suspended switch instruction is reduced to
+ // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
auto *BR = dyn_cast<BranchInst>(I->getNextNode());
- if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
- // If the case number of suspended switch instruction is reduced to
- // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
- // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
- ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
- if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
- Value *V = CondCmp->getOperand(0);
- auto it = ResolvedValues.find(V);
- if (it != ResolvedValues.end())
- V = it->second;
-
- if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
- BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
- ? BR->getSuccessor(0)
- : BR->getSuccessor(1);
- scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
- I = BB->getFirstNonPHIOrDbgOrLifetime();
- continue;
- }
- }
- }
+ if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
+ return false;
+
+ // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
+ // So we try to resolve constant for the first operand only since the
+ // second operand should be literal constant by design.
+ ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
+ auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
+ if (!Cond0 || !Cond1)
+ return false;
+
+ // Both operands of the CmpInst are Constant. So that we could evaluate
+ // it immediately to get the destination.
+ auto *ConstResult =
+ dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
+ CondCmp->getPredicate(), Cond0, Cond1, DL));
+ if (!ConstResult)
+ return false;
+
+ CondCmp->replaceAllUsesWith(ConstResult);
+ CondCmp->eraseFromParent();
+
+ // Handle this branch in next iteration.
+ I = BR;
+ continue;
} else if (auto *SI = dyn_cast<SwitchInst>(I)) {
- Value *V = SI->getCondition();
- auto it = ResolvedValues.find(V);
- if (it != ResolvedValues.end())
- V = it->second;
- if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
- BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
- scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
- I = BB->getFirstNonPHIOrDbgOrLifetime();
- continue;
- }
+ ConstantInt *Cond = TryResolveConstant(SI->getCondition());
+ if (!Cond)
+ return false;
+
+ BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
+ scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
+ I = BB->getFirstNonPHIOrDbgOrLifetime();
+ continue;
}
return false;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 4e3b18e805ee..71b3a411cc18 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2843,6 +2843,26 @@ static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
}
assert(FreeInstrBB->size() == 1 &&
"Only the branch instruction should remain");
+
+ // Now that we've moved the call to free before the NULL check, we have to
+ // remove any attributes on its parameter that imply it's non-null, because
+ // those attributes might have only been valid because of the NULL check, and
+ // we can get miscompiles if we keep them. This is conservative if non-null is
+ // also implied by something other than the NULL check, but it's guaranteed to
+ // be correct, and the conservativeness won't matter in practice, since the
+ // attributes are irrelevant for the call to free itself and the pointer
+ // shouldn't be used after the call.
+ AttributeList Attrs = FI.getAttributes();
+ Attrs = Attrs.removeParamAttribute(FI.getContext(), 0, Attribute::NonNull);
+ Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
+ if (Dereferenceable.isValid()) {
+ uint64_t Bytes = Dereferenceable.getDereferenceableBytes();
+ Attrs = Attrs.removeParamAttribute(FI.getContext(), 0,
+ Attribute::Dereferenceable);
+ Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.getContext(), 0, Bytes);
+ }
+ FI.setAttributes(Attrs);
+
return &FI;
}
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index d22b3f409585..9d8130d1ac02 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1303,17 +1303,10 @@ struct DSEState {
/// loop. In particular, this guarantees that it only references a single
/// MemoryLocation during execution of the containing function.
bool isGuaranteedLoopInvariant(const Value *Ptr) {
- auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) {
+ auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) {
Ptr = Ptr->stripPointerCasts();
- if (auto *I = dyn_cast<Instruction>(Ptr)) {
- if (isa<AllocaInst>(Ptr))
- return true;
-
- if (isAllocLikeFn(I, &TLI))
- return true;
-
- return false;
- }
+ if (auto *I = dyn_cast<Instruction>(Ptr))
+ return I->getParent()->isEntryBlock();
return true;
};
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index f13f24ad2027..a04d4ef3c086 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -154,6 +154,10 @@ BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
return {};
}
Value *const Addr = LoadI->getOperand(0);
+ if (Addr->getType()->getPointerAddressSpace() != 0) {
+ LLVM_DEBUG(dbgs() << "from non-zero AddressSpace\n");
+ return {};
+ }
auto *const GEP = dyn_cast<GetElementPtrInst>(Addr);
if (!GEP)
return {};
diff --git a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
index 85e5adaeaf5e..3127432dc6c9 100644
--- a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
+++ b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
@@ -144,6 +144,10 @@ static void convertToRelLookupTable(GlobalVariable &LookupTable) {
Value *Offset =
Builder.CreateShl(Index, ConstantInt::get(IntTy, 2), "reltable.shift");
+ // Insert the call to load.relative instrinsic before LOAD.
+ // GEP might not be immediately followed by a LOAD, like it can be hoisted
+ // outside the loop or another instruction might be inserted them in between.
+ Builder.SetInsertPoint(Load);
Function *LoadRelIntrinsic = llvm::Intrinsic::getDeclaration(
&M, Intrinsic::load_relative, {Index->getType()});
Value *Base = Builder.CreateBitCast(RelLookupTable, Builder.getInt8PtrTy());
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index cc3f5c7d4b48..1d06bc7d79a7 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -5430,8 +5430,11 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
// The pointer operand uses an in-tree scalar so we add the new BitCast
// to ExternalUses list to make sure that an extract will be generated
// in the future.
- if (getTreeEntry(PO))
- ExternalUses.emplace_back(PO, cast<User>(VecPtr), 0);
+ if (TreeEntry *Entry = getTreeEntry(PO)) {
+ // Find which lane we need to extract.
+ unsigned FoundLane = Entry->findLaneForValue(PO);
+ ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane);
+ }
NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
} else {
@@ -5474,8 +5477,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
// The pointer operand uses an in-tree scalar, so add the new BitCast to
// ExternalUses to make sure that an extract will be generated in the
// future.
- if (getTreeEntry(ScalarPtr))
- ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0));
+ if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) {
+ // Find which lane we need to extract.
+ unsigned FoundLane = Entry->findLaneForValue(ScalarPtr);
+ ExternalUses.push_back(
+ ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane));
+ }
Value *V = propagateMetadata(ST, E->Scalars);
@@ -5577,8 +5584,14 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
// The scalar argument uses an in-tree scalar so we add the new vectorized
// call to ExternalUses list to make sure that an extract will be
// generated in the future.
- if (ScalarArg && getTreeEntry(ScalarArg))
- ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
+ if (ScalarArg) {
+ if (TreeEntry *Entry = getTreeEntry(ScalarArg)) {
+ // Find which lane we need to extract.
+ unsigned FoundLane = Entry->findLaneForValue(ScalarArg);
+ ExternalUses.push_back(
+ ExternalUser(ScalarArg, cast<User>(V), FoundLane));
+ }
+ }
propagateIRFlags(V, E->Scalars, VL0);
ShuffleBuilder.addMask(E->ReuseShuffleIndices);