aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/VectorUtils.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-07-29 20:15:26 +0000
committerDimitry Andric <dim@FreeBSD.org>2021-07-29 20:15:26 +0000
commit344a3780b2e33f6ca763666c380202b18aab72a3 (patch)
treef0b203ee6eb71d7fdd792373e3c81eb18d6934dd /llvm/lib/Analysis/VectorUtils.cpp
parentb60736ec1405bb0a8dd40989f67ef4c93da068ab (diff)
downloadsrc-344a3780b2e33f6ca763666c380202b18aab72a3.tar.gz
src-344a3780b2e33f6ca763666c380202b18aab72a3.zip
the upstream release/13.x branch was created.
Diffstat (limited to 'llvm/lib/Analysis/VectorUtils.cpp')
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp51
1 files changed, 32 insertions, 19 deletions
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 9a4c96b6f7c2..0a14a1432934 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -114,6 +114,16 @@ bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
}
}
+bool llvm::hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID,
+ unsigned ScalarOpdIdx) {
+ switch (ID) {
+ case Intrinsic::powi:
+ return (ScalarOpdIdx == 1);
+ default:
+ return false;
+ }
+}
+
/// Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its ID, in case it does not found it return not_intrinsic.
@@ -586,8 +596,8 @@ llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
uint64_t LeaderDemandedBits = 0;
- for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
- LeaderDemandedBits |= DBits[*MI];
+ for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
+ LeaderDemandedBits |= DBits[M];
uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
llvm::countLeadingZeros(LeaderDemandedBits);
@@ -600,22 +610,22 @@ llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
// indvars.
// If we are required to shrink a PHI, abandon this entire equivalence class.
bool Abort = false;
- for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
- if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
+ for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
+ if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
Abort = true;
break;
}
if (Abort)
continue;
- for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
- if (!isa<Instruction>(*MI))
+ for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
+ if (!isa<Instruction>(M))
continue;
- Type *Ty = (*MI)->getType();
- if (Roots.count(*MI))
- Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
+ Type *Ty = M->getType();
+ if (Roots.count(M))
+ Ty = cast<Instruction>(M)->getOperand(0)->getType();
if (MinBW < Ty->getScalarSizeInBits())
- MinBWs[cast<Instruction>(*MI)] = MinBW;
+ MinBWs[cast<Instruction>(M)] = MinBW;
}
}
@@ -708,6 +718,8 @@ MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
/// \returns \p I after propagating metadata from \p VL.
Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
+ if (VL.empty())
+ return Inst;
Instruction *I0 = cast<Instruction>(VL[0]);
SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
I0->getAllMetadataOtherThanDebugLoc(Metadata);
@@ -891,7 +903,6 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
return true;
}
-
bool llvm::maskIsAllOneOrUndef(Value *Mask) {
assert(isa<VectorType>(Mask->getType()) &&
isa<IntegerType>(Mask->getType()->getScalarType()) &&
@@ -957,12 +968,11 @@ void InterleavedAccessInfo::collectConstStrideAccesses(
DFS.perform(LI);
for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
for (auto &I : *BB) {
- auto *LI = dyn_cast<LoadInst>(&I);
- auto *SI = dyn_cast<StoreInst>(&I);
- if (!LI && !SI)
+ Value *Ptr = getLoadStorePointerOperand(&I);
+ if (!Ptr)
continue;
+ Type *ElementTy = getLoadStoreType(&I);
- Value *Ptr = getLoadStorePointerOperand(&I);
// We don't check wrapping here because we don't know yet if Ptr will be
// part of a full group or a group with gaps. Checking wrapping for all
// pointers (even those that end up in groups with no gaps) will be overly
@@ -974,8 +984,7 @@ void InterleavedAccessInfo::collectConstStrideAccesses(
/*Assume=*/true, /*ShouldCheckWrap=*/false);
const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
- PointerType *PtrTy = cast<PointerType>(Ptr->getType());
- uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
+ uint64_t Size = DL.getTypeAllocSize(ElementTy);
AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
getLoadStoreAlignment(&I));
}
@@ -1300,10 +1309,14 @@ void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
std::string VFABI::mangleTLIVectorName(StringRef VectorName,
StringRef ScalarName, unsigned numArgs,
- unsigned VF) {
+ ElementCount VF) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- Out << "_ZGV" << VFABI::_LLVM_ << "N" << VF;
+ Out << "_ZGV" << VFABI::_LLVM_ << "N";
+ if (VF.isScalable())
+ Out << 'x';
+ else
+ Out << VF.getFixedValue();
for (unsigned I = 0; I < numArgs; ++I)
Out << "v";
Out << "_" << ScalarName << "(" << VectorName << ")";