aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/IR
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/IR')
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AbstractCallSite.cpp153
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp4871
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Assumptions.cpp111
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AttributeImpl.h333
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Attributes.cpp2077
-rw-r--r--contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp4706
-rw-r--r--contrib/llvm-project/llvm/lib/IR/BasicBlock.cpp522
-rw-r--r--contrib/llvm-project/llvm/lib/IR/BuiltinGCs.cpp130
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Comdat.cpp83
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp2281
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ConstantRange.cpp1820
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Constants.cpp3490
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ConstantsContext.h698
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Core.cpp4202
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp1157
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DataLayout.cpp1012
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp1617
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp1840
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DebugLoc.cpp119
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DiagnosticHandler.cpp87
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DiagnosticInfo.cpp440
-rw-r--r--contrib/llvm-project/llvm/lib/IR/DiagnosticPrinter.cpp116
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Dominators.cpp426
-rw-r--r--contrib/llvm-project/llvm/lib/IR/FPEnv.cpp130
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Function.cpp2056
-rw-r--r--contrib/llvm-project/llvm/lib/IR/GCStrategy.cpp38
-rw-r--r--contrib/llvm-project/llvm/lib/IR/GVMaterializer.cpp17
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Globals.cpp581
-rw-r--r--contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp1305
-rw-r--r--contrib/llvm-project/llvm/lib/IR/IRPrintingPasses.cpp140
-rw-r--r--contrib/llvm-project/llvm/lib/IR/InlineAsm.cpp316
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Instruction.cpp878
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Instructions.cpp4846
-rw-r--r--contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp726
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LLVMContext.cpp380
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.cpp265
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h1582
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LLVMRemarkStreamer.cpp173
-rw-r--r--contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp1766
-rw-r--r--contrib/llvm-project/llvm/lib/IR/MDBuilder.cpp325
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Mangler.cpp260
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Metadata.cpp1655
-rw-r--r--contrib/llvm-project/llvm/lib/IR/MetadataImpl.h58
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Module.cpp850
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp670
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Operator.cpp250
-rw-r--r--contrib/llvm-project/llvm/lib/IR/OptBisect.cpp58
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Pass.cpp291
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PassInstrumentation.cpp41
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PassManager.cpp154
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PassRegistry.cpp120
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PassTimingInfo.cpp293
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp88
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ProfileSummary.cpp267
-rw-r--r--contrib/llvm-project/llvm/lib/IR/PseudoProbe.cpp101
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ReplaceConstant.cpp136
-rw-r--r--contrib/llvm-project/llvm/lib/IR/SSAContext.cpp48
-rw-r--r--contrib/llvm-project/llvm/lib/IR/SafepointIRVerifier.cpp911
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Statepoint.cpp40
-rw-r--r--contrib/llvm-project/llvm/lib/IR/StructuralHash.cpp84
-rw-r--r--contrib/llvm-project/llvm/lib/IR/SymbolTableListTraitsImpl.h124
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Type.cpp786
-rw-r--r--contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp202
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Use.cpp42
-rw-r--r--contrib/llvm-project/llvm/lib/IR/User.cpp206
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Value.cpp1244
-rw-r--r--contrib/llvm-project/llvm/lib/IR/ValueSymbolTable.cpp130
-rw-r--r--contrib/llvm-project/llvm/lib/IR/VectorBuilder.cpp103
-rw-r--r--contrib/llvm-project/llvm/lib/IR/Verifier.cpp6604
69 files changed, 63631 insertions, 0 deletions
diff --git a/contrib/llvm-project/llvm/lib/IR/AbstractCallSite.cpp b/contrib/llvm-project/llvm/lib/IR/AbstractCallSite.cpp
new file mode 100644
index 000000000000..b7a10846a0d3
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/AbstractCallSite.cpp
@@ -0,0 +1,153 @@
+//===-- AbstractCallSite.cpp - Implementation of abstract call sites ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements abstract call sites which unify the interface for
+// direct, indirect, and callback call sites.
+//
+// For more information see:
+// https://llvm.org/devmtg/2018-10/talk-abstracts.html#talk20
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/AbstractCallSite.h"
+#include "llvm/ADT/Statistic.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "abstract-call-sites"
+
+STATISTIC(NumCallbackCallSites, "Number of callback call sites created");
+STATISTIC(NumDirectAbstractCallSites,
+ "Number of direct abstract call sites created");
+STATISTIC(NumInvalidAbstractCallSitesUnknownUse,
+ "Number of invalid abstract call sites created (unknown use)");
+STATISTIC(NumInvalidAbstractCallSitesUnknownCallee,
+ "Number of invalid abstract call sites created (unknown callee)");
+STATISTIC(NumInvalidAbstractCallSitesNoCallback,
+ "Number of invalid abstract call sites created (no callback)");
+
+void AbstractCallSite::getCallbackUses(
+ const CallBase &CB, SmallVectorImpl<const Use *> &CallbackUses) {
+ const Function *Callee = CB.getCalledFunction();
+ if (!Callee)
+ return;
+
+ MDNode *CallbackMD = Callee->getMetadata(LLVMContext::MD_callback);
+ if (!CallbackMD)
+ return;
+
+ for (const MDOperand &Op : CallbackMD->operands()) {
+ MDNode *OpMD = cast<MDNode>(Op.get());
+ auto *CBCalleeIdxAsCM = cast<ConstantAsMetadata>(OpMD->getOperand(0));
+ uint64_t CBCalleeIdx =
+ cast<ConstantInt>(CBCalleeIdxAsCM->getValue())->getZExtValue();
+ if (CBCalleeIdx < CB.arg_size())
+ CallbackUses.push_back(CB.arg_begin() + CBCalleeIdx);
+ }
+}
+
+/// Create an abstract call site from a use.
+AbstractCallSite::AbstractCallSite(const Use *U)
+ : CB(dyn_cast<CallBase>(U->getUser())) {
+
+ // First handle unknown users.
+ if (!CB) {
+
+ // If the use is actually in a constant cast expression which itself
+ // has only one use, we look through the constant cast expression.
+ // This happens by updating the use @p U to the use of the constant
+ // cast expression and afterwards re-initializing CB accordingly.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U->getUser()))
+ if (CE->hasOneUse() && CE->isCast()) {
+ U = &*CE->use_begin();
+ CB = dyn_cast<CallBase>(U->getUser());
+ }
+
+ if (!CB) {
+ NumInvalidAbstractCallSitesUnknownUse++;
+ return;
+ }
+ }
+
+ // Then handle direct or indirect calls. Thus, if U is the callee of the
+ // call site CB it is not a callback and we are done.
+ if (CB->isCallee(U)) {
+ NumDirectAbstractCallSites++;
+ return;
+ }
+
+ // If we cannot identify the broker function we cannot create a callback and
+ // invalidate the abstract call site.
+ Function *Callee = CB->getCalledFunction();
+ if (!Callee) {
+ NumInvalidAbstractCallSitesUnknownCallee++;
+ CB = nullptr;
+ return;
+ }
+
+ MDNode *CallbackMD = Callee->getMetadata(LLVMContext::MD_callback);
+ if (!CallbackMD) {
+ NumInvalidAbstractCallSitesNoCallback++;
+ CB = nullptr;
+ return;
+ }
+
+ unsigned UseIdx = CB->getArgOperandNo(U);
+ MDNode *CallbackEncMD = nullptr;
+ for (const MDOperand &Op : CallbackMD->operands()) {
+ MDNode *OpMD = cast<MDNode>(Op.get());
+ auto *CBCalleeIdxAsCM = cast<ConstantAsMetadata>(OpMD->getOperand(0));
+ uint64_t CBCalleeIdx =
+ cast<ConstantInt>(CBCalleeIdxAsCM->getValue())->getZExtValue();
+ if (CBCalleeIdx != UseIdx)
+ continue;
+ CallbackEncMD = OpMD;
+ break;
+ }
+
+ if (!CallbackEncMD) {
+ NumInvalidAbstractCallSitesNoCallback++;
+ CB = nullptr;
+ return;
+ }
+
+ NumCallbackCallSites++;
+
+ assert(CallbackEncMD->getNumOperands() >= 2 && "Incomplete !callback metadata");
+
+ unsigned NumCallOperands = CB->arg_size();
+ // Skip the var-arg flag at the end when reading the metadata.
+ for (unsigned u = 0, e = CallbackEncMD->getNumOperands() - 1; u < e; u++) {
+ Metadata *OpAsM = CallbackEncMD->getOperand(u).get();
+ auto *OpAsCM = cast<ConstantAsMetadata>(OpAsM);
+ assert(OpAsCM->getType()->isIntegerTy(64) &&
+ "Malformed !callback metadata");
+
+ int64_t Idx = cast<ConstantInt>(OpAsCM->getValue())->getSExtValue();
+ assert(-1 <= Idx && Idx <= NumCallOperands &&
+ "Out-of-bounds !callback metadata index");
+
+ CI.ParameterEncoding.push_back(Idx);
+ }
+
+ if (!Callee->isVarArg())
+ return;
+
+ Metadata *VarArgFlagAsM =
+ CallbackEncMD->getOperand(CallbackEncMD->getNumOperands() - 1).get();
+ auto *VarArgFlagAsCM = cast<ConstantAsMetadata>(VarArgFlagAsM);
+ assert(VarArgFlagAsCM->getType()->isIntegerTy(1) &&
+ "Malformed !callback metadata var-arg flag");
+
+ if (VarArgFlagAsCM->getValue()->isNullValue())
+ return;
+
+ // Add all variadic arguments at the end.
+ for (unsigned u = Callee->arg_size(); u < NumCallOperands; u++)
+ CI.ParameterEncoding.push_back(u);
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp b/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp
new file mode 100644
index 000000000000..a29040b8c2aa
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/AsmWriter.cpp
@@ -0,0 +1,4871 @@
+//===- AsmWriter.cpp - Printing LLVM as an assembly file ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This library implements `print` family of functions in classes like
+// Module, Function, Value, etc. In-memory representation of those classes is
+// converted to IR strings.
+//
+// Note that these routines must be extremely tolerant of various errors in the
+// LLVM code, because it can be used for debugging transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/AssemblyAnnotationWriter.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/TypeFinder.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+// Make virtual table appear in this compilation unit.
+AssemblyAnnotationWriter::~AssemblyAnnotationWriter() = default;
+
+//===----------------------------------------------------------------------===//
+// Helper Functions
+//===----------------------------------------------------------------------===//
+
+using OrderMap = MapVector<const Value *, unsigned>;
+
+using UseListOrderMap =
+ DenseMap<const Function *, MapVector<const Value *, std::vector<unsigned>>>;
+
+/// Look for a value that might be wrapped as metadata, e.g. a value in a
+/// metadata operand. Returns the input value as-is if it is not wrapped.
+static const Value *skipMetadataWrapper(const Value *V) {
+ if (const auto *MAV = dyn_cast<MetadataAsValue>(V))
+ if (const auto *VAM = dyn_cast<ValueAsMetadata>(MAV->getMetadata()))
+ return VAM->getValue();
+ return V;
+}
+
+static void orderValue(const Value *V, OrderMap &OM) {
+ if (OM.lookup(V))
+ return;
+
+ if (const Constant *C = dyn_cast<Constant>(V))
+ if (C->getNumOperands() && !isa<GlobalValue>(C))
+ for (const Value *Op : C->operands())
+ if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
+ orderValue(Op, OM);
+
+ // Note: we cannot cache this lookup above, since inserting into the map
+ // changes the map's size, and thus affects the other IDs.
+ unsigned ID = OM.size() + 1;
+ OM[V] = ID;
+}
+
+static OrderMap orderModule(const Module *M) {
+ OrderMap OM;
+
+ for (const GlobalVariable &G : M->globals()) {
+ if (G.hasInitializer())
+ if (!isa<GlobalValue>(G.getInitializer()))
+ orderValue(G.getInitializer(), OM);
+ orderValue(&G, OM);
+ }
+ for (const GlobalAlias &A : M->aliases()) {
+ if (!isa<GlobalValue>(A.getAliasee()))
+ orderValue(A.getAliasee(), OM);
+ orderValue(&A, OM);
+ }
+ for (const GlobalIFunc &I : M->ifuncs()) {
+ if (!isa<GlobalValue>(I.getResolver()))
+ orderValue(I.getResolver(), OM);
+ orderValue(&I, OM);
+ }
+ for (const Function &F : *M) {
+ for (const Use &U : F.operands())
+ if (!isa<GlobalValue>(U.get()))
+ orderValue(U.get(), OM);
+
+ orderValue(&F, OM);
+
+ if (F.isDeclaration())
+ continue;
+
+ for (const Argument &A : F.args())
+ orderValue(&A, OM);
+ for (const BasicBlock &BB : F) {
+ orderValue(&BB, OM);
+ for (const Instruction &I : BB) {
+ for (const Value *Op : I.operands()) {
+ Op = skipMetadataWrapper(Op);
+ if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
+ isa<InlineAsm>(*Op))
+ orderValue(Op, OM);
+ }
+ orderValue(&I, OM);
+ }
+ }
+ }
+ return OM;
+}
+
+static std::vector<unsigned>
+predictValueUseListOrder(const Value *V, unsigned ID, const OrderMap &OM) {
+ // Predict use-list order for this one.
+ using Entry = std::pair<const Use *, unsigned>;
+ SmallVector<Entry, 64> List;
+ for (const Use &U : V->uses())
+ // Check if this user will be serialized.
+ if (OM.lookup(U.getUser()))
+ List.push_back(std::make_pair(&U, List.size()));
+
+ if (List.size() < 2)
+ // We may have lost some users.
+ return {};
+
+ // When referencing a value before its declaration, a temporary value is
+ // created, which will later be RAUWed with the actual value. This reverses
+ // the use list. This happens for all values apart from basic blocks.
+ bool GetsReversed = !isa<BasicBlock>(V);
+ if (auto *BA = dyn_cast<BlockAddress>(V))
+ ID = OM.lookup(BA->getBasicBlock());
+ llvm::sort(List, [&](const Entry &L, const Entry &R) {
+ const Use *LU = L.first;
+ const Use *RU = R.first;
+ if (LU == RU)
+ return false;
+
+ auto LID = OM.lookup(LU->getUser());
+ auto RID = OM.lookup(RU->getUser());
+
+ // If ID is 4, then expect: 7 6 5 1 2 3.
+ if (LID < RID) {
+ if (GetsReversed)
+ if (RID <= ID)
+ return true;
+ return false;
+ }
+ if (RID < LID) {
+ if (GetsReversed)
+ if (LID <= ID)
+ return false;
+ return true;
+ }
+
+ // LID and RID are equal, so we have different operands of the same user.
+ // Assume operands are added in order for all instructions.
+ if (GetsReversed)
+ if (LID <= ID)
+ return LU->getOperandNo() < RU->getOperandNo();
+ return LU->getOperandNo() > RU->getOperandNo();
+ });
+
+ if (llvm::is_sorted(List, llvm::less_second()))
+ // Order is already correct.
+ return {};
+
+ // Store the shuffle.
+ std::vector<unsigned> Shuffle(List.size());
+ for (size_t I = 0, E = List.size(); I != E; ++I)
+ Shuffle[I] = List[I].second;
+ return Shuffle;
+}
+
+static UseListOrderMap predictUseListOrder(const Module *M) {
+ OrderMap OM = orderModule(M);
+ UseListOrderMap ULOM;
+ for (const auto &Pair : OM) {
+ const Value *V = Pair.first;
+ if (V->use_empty() || std::next(V->use_begin()) == V->use_end())
+ continue;
+
+ std::vector<unsigned> Shuffle =
+ predictValueUseListOrder(V, Pair.second, OM);
+ if (Shuffle.empty())
+ continue;
+
+ const Function *F = nullptr;
+ if (auto *I = dyn_cast<Instruction>(V))
+ F = I->getFunction();
+ if (auto *A = dyn_cast<Argument>(V))
+ F = A->getParent();
+ if (auto *BB = dyn_cast<BasicBlock>(V))
+ F = BB->getParent();
+ ULOM[F][V] = std::move(Shuffle);
+ }
+ return ULOM;
+}
+
+static const Module *getModuleFromVal(const Value *V) {
+ if (const Argument *MA = dyn_cast<Argument>(V))
+ return MA->getParent() ? MA->getParent()->getParent() : nullptr;
+
+ if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ return BB->getParent() ? BB->getParent()->getParent() : nullptr;
+
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr;
+ return M ? M->getParent() : nullptr;
+ }
+
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ return GV->getParent();
+
+ if (const auto *MAV = dyn_cast<MetadataAsValue>(V)) {
+ for (const User *U : MAV->users())
+ if (isa<Instruction>(U))
+ if (const Module *M = getModuleFromVal(U))
+ return M;
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
+static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
+ switch (cc) {
+ default: Out << "cc" << cc; break;
+ case CallingConv::Fast: Out << "fastcc"; break;
+ case CallingConv::Cold: Out << "coldcc"; break;
+ case CallingConv::WebKit_JS: Out << "webkit_jscc"; break;
+ case CallingConv::AnyReg: Out << "anyregcc"; break;
+ case CallingConv::PreserveMost: Out << "preserve_mostcc"; break;
+ case CallingConv::PreserveAll: Out << "preserve_allcc"; break;
+ case CallingConv::CXX_FAST_TLS: Out << "cxx_fast_tlscc"; break;
+ case CallingConv::GHC: Out << "ghccc"; break;
+ case CallingConv::Tail: Out << "tailcc"; break;
+ case CallingConv::CFGuard_Check: Out << "cfguard_checkcc"; break;
+ case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break;
+ case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break;
+ case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break;
+ case CallingConv::X86_RegCall: Out << "x86_regcallcc"; break;
+ case CallingConv::X86_VectorCall:Out << "x86_vectorcallcc"; break;
+ case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break;
+ case CallingConv::ARM_APCS: Out << "arm_apcscc"; break;
+ case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break;
+ case CallingConv::ARM_AAPCS_VFP: Out << "arm_aapcs_vfpcc"; break;
+ case CallingConv::AArch64_VectorCall: Out << "aarch64_vector_pcs"; break;
+ case CallingConv::AArch64_SVE_VectorCall:
+ Out << "aarch64_sve_vector_pcs";
+ break;
+ case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break;
+ case CallingConv::AVR_INTR: Out << "avr_intrcc "; break;
+ case CallingConv::AVR_SIGNAL: Out << "avr_signalcc "; break;
+ case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break;
+ case CallingConv::PTX_Device: Out << "ptx_device"; break;
+ case CallingConv::X86_64_SysV: Out << "x86_64_sysvcc"; break;
+ case CallingConv::Win64: Out << "win64cc"; break;
+ case CallingConv::SPIR_FUNC: Out << "spir_func"; break;
+ case CallingConv::SPIR_KERNEL: Out << "spir_kernel"; break;
+ case CallingConv::Swift: Out << "swiftcc"; break;
+ case CallingConv::SwiftTail: Out << "swifttailcc"; break;
+ case CallingConv::X86_INTR: Out << "x86_intrcc"; break;
+ case CallingConv::HHVM: Out << "hhvmcc"; break;
+ case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
+ case CallingConv::AMDGPU_VS: Out << "amdgpu_vs"; break;
+ case CallingConv::AMDGPU_LS: Out << "amdgpu_ls"; break;
+ case CallingConv::AMDGPU_HS: Out << "amdgpu_hs"; break;
+ case CallingConv::AMDGPU_ES: Out << "amdgpu_es"; break;
+ case CallingConv::AMDGPU_GS: Out << "amdgpu_gs"; break;
+ case CallingConv::AMDGPU_PS: Out << "amdgpu_ps"; break;
+ case CallingConv::AMDGPU_CS: Out << "amdgpu_cs"; break;
+ case CallingConv::AMDGPU_KERNEL: Out << "amdgpu_kernel"; break;
+ case CallingConv::AMDGPU_Gfx: Out << "amdgpu_gfx"; break;
+ }
+}
+
+enum PrefixType {
+ GlobalPrefix,
+ ComdatPrefix,
+ LabelPrefix,
+ LocalPrefix,
+ NoPrefix
+};
+
+void llvm::printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name) {
+ assert(!Name.empty() && "Cannot get empty name!");
+
+ // Scan the name to see if it needs quotes first.
+ bool NeedsQuotes = isdigit(static_cast<unsigned char>(Name[0]));
+ if (!NeedsQuotes) {
+ for (unsigned char C : Name) {
+ // By making this unsigned, the value passed in to isalnum will always be
+ // in the range 0-255. This is important when building with MSVC because
+ // its implementation will assert. This situation can arise when dealing
+ // with UTF-8 multibyte characters.
+ if (!isalnum(static_cast<unsigned char>(C)) && C != '-' && C != '.' &&
+ C != '_') {
+ NeedsQuotes = true;
+ break;
+ }
+ }
+ }
+
+ // If we didn't need any quotes, just write out the name in one blast.
+ if (!NeedsQuotes) {
+ OS << Name;
+ return;
+ }
+
+ // Okay, we need quotes. Output the quotes and escape any scary characters as
+ // needed.
+ OS << '"';
+ printEscapedString(Name, OS);
+ OS << '"';
+}
+
+/// Turn the specified name into an 'LLVM name', which is either prefixed with %
+/// (if the string only contains simple characters) or is surrounded with ""'s
+/// (if it has special chars in it). Print it out.
+static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
+ switch (Prefix) {
+ case NoPrefix:
+ break;
+ case GlobalPrefix:
+ OS << '@';
+ break;
+ case ComdatPrefix:
+ OS << '$';
+ break;
+ case LabelPrefix:
+ break;
+ case LocalPrefix:
+ OS << '%';
+ break;
+ }
+ printLLVMNameWithoutPrefix(OS, Name);
+}
+
+/// Turn the specified name into an 'LLVM name', which is either prefixed with %
+/// (if the string only contains simple characters) or is surrounded with ""'s
+/// (if it has special chars in it). Print it out.
+static void PrintLLVMName(raw_ostream &OS, const Value *V) {
+ PrintLLVMName(OS, V->getName(),
+ isa<GlobalValue>(V) ? GlobalPrefix : LocalPrefix);
+}
+
+static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef<int> Mask) {
+ Out << ", <";
+ if (isa<ScalableVectorType>(Ty))
+ Out << "vscale x ";
+ Out << Mask.size() << " x i32> ";
+ bool FirstElt = true;
+ if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
+ Out << "zeroinitializer";
+ } else if (all_of(Mask, [](int Elt) { return Elt == UndefMaskElem; })) {
+ Out << "undef";
+ } else {
+ Out << "<";
+ for (int Elt : Mask) {
+ if (FirstElt)
+ FirstElt = false;
+ else
+ Out << ", ";
+ Out << "i32 ";
+ if (Elt == UndefMaskElem)
+ Out << "undef";
+ else
+ Out << Elt;
+ }
+ Out << ">";
+ }
+}
+
+namespace {
+
+class TypePrinting {
+public:
+ TypePrinting(const Module *M = nullptr) : DeferredM(M) {}
+
+ TypePrinting(const TypePrinting &) = delete;
+ TypePrinting &operator=(const TypePrinting &) = delete;
+
+ /// The named types that are used by the current module.
+ TypeFinder &getNamedTypes();
+
+ /// The numbered types, number to type mapping.
+ std::vector<StructType *> &getNumberedTypes();
+
+ bool empty();
+
+ void print(Type *Ty, raw_ostream &OS);
+
+ void printStructBody(StructType *Ty, raw_ostream &OS);
+
+private:
+ void incorporateTypes();
+
+ /// A module to process lazily when needed. Set to nullptr as soon as used.
+ const Module *DeferredM;
+
+ TypeFinder NamedTypes;
+
+ // The numbered types, along with their value.
+ DenseMap<StructType *, unsigned> Type2Number;
+
+ std::vector<StructType *> NumberedTypes;
+};
+
+} // end anonymous namespace
+
+TypeFinder &TypePrinting::getNamedTypes() {
+ incorporateTypes();
+ return NamedTypes;
+}
+
+std::vector<StructType *> &TypePrinting::getNumberedTypes() {
+ incorporateTypes();
+
+ // We know all the numbers that each type is used and we know that it is a
+ // dense assignment. Convert the map to an index table, if it's not done
+ // already (judging from the sizes):
+ if (NumberedTypes.size() == Type2Number.size())
+ return NumberedTypes;
+
+ NumberedTypes.resize(Type2Number.size());
+ for (const auto &P : Type2Number) {
+ assert(P.second < NumberedTypes.size() && "Didn't get a dense numbering?");
+ assert(!NumberedTypes[P.second] && "Didn't get a unique numbering?");
+ NumberedTypes[P.second] = P.first;
+ }
+ return NumberedTypes;
+}
+
+bool TypePrinting::empty() {
+ incorporateTypes();
+ return NamedTypes.empty() && Type2Number.empty();
+}
+
+void TypePrinting::incorporateTypes() {
+ if (!DeferredM)
+ return;
+
+ NamedTypes.run(*DeferredM, false);
+ DeferredM = nullptr;
+
+ // The list of struct types we got back includes all the struct types, split
+ // the unnamed ones out to a numbering and remove the anonymous structs.
+ unsigned NextNumber = 0;
+
+ std::vector<StructType *>::iterator NextToUse = NamedTypes.begin();
+ for (StructType *STy : NamedTypes) {
+ // Ignore anonymous types.
+ if (STy->isLiteral())
+ continue;
+
+ if (STy->getName().empty())
+ Type2Number[STy] = NextNumber++;
+ else
+ *NextToUse++ = STy;
+ }
+
+ NamedTypes.erase(NextToUse, NamedTypes.end());
+}
+
+/// Write the specified type to the specified raw_ostream, making use of type
+/// names or up references to shorten the type name where possible.
+void TypePrinting::print(Type *Ty, raw_ostream &OS) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: OS << "void"; return;
+ case Type::HalfTyID: OS << "half"; return;
+ case Type::BFloatTyID: OS << "bfloat"; return;
+ case Type::FloatTyID: OS << "float"; return;
+ case Type::DoubleTyID: OS << "double"; return;
+ case Type::X86_FP80TyID: OS << "x86_fp80"; return;
+ case Type::FP128TyID: OS << "fp128"; return;
+ case Type::PPC_FP128TyID: OS << "ppc_fp128"; return;
+ case Type::LabelTyID: OS << "label"; return;
+ case Type::MetadataTyID: OS << "metadata"; return;
+ case Type::X86_MMXTyID: OS << "x86_mmx"; return;
+ case Type::X86_AMXTyID: OS << "x86_amx"; return;
+ case Type::TokenTyID: OS << "token"; return;
+ case Type::IntegerTyID:
+ OS << 'i' << cast<IntegerType>(Ty)->getBitWidth();
+ return;
+
+ case Type::FunctionTyID: {
+ FunctionType *FTy = cast<FunctionType>(Ty);
+ print(FTy->getReturnType(), OS);
+ OS << " (";
+ ListSeparator LS;
+ for (Type *Ty : FTy->params()) {
+ OS << LS;
+ print(Ty, OS);
+ }
+ if (FTy->isVarArg())
+ OS << LS << "...";
+ OS << ')';
+ return;
+ }
+ case Type::StructTyID: {
+ StructType *STy = cast<StructType>(Ty);
+
+ if (STy->isLiteral())
+ return printStructBody(STy, OS);
+
+ if (!STy->getName().empty())
+ return PrintLLVMName(OS, STy->getName(), LocalPrefix);
+
+ incorporateTypes();
+ const auto I = Type2Number.find(STy);
+ if (I != Type2Number.end())
+ OS << '%' << I->second;
+ else // Not enumerated, print the hex address.
+ OS << "%\"type " << STy << '\"';
+ return;
+ }
+ case Type::PointerTyID: {
+ PointerType *PTy = cast<PointerType>(Ty);
+ if (PTy->isOpaque()) {
+ OS << "ptr";
+ if (unsigned AddressSpace = PTy->getAddressSpace())
+ OS << " addrspace(" << AddressSpace << ')';
+ return;
+ }
+ print(PTy->getNonOpaquePointerElementType(), OS);
+ if (unsigned AddressSpace = PTy->getAddressSpace())
+ OS << " addrspace(" << AddressSpace << ')';
+ OS << '*';
+ return;
+ }
+ case Type::ArrayTyID: {
+ ArrayType *ATy = cast<ArrayType>(Ty);
+ OS << '[' << ATy->getNumElements() << " x ";
+ print(ATy->getElementType(), OS);
+ OS << ']';
+ return;
+ }
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID: {
+ VectorType *PTy = cast<VectorType>(Ty);
+ ElementCount EC = PTy->getElementCount();
+ OS << "<";
+ if (EC.isScalable())
+ OS << "vscale x ";
+ OS << EC.getKnownMinValue() << " x ";
+ print(PTy->getElementType(), OS);
+ OS << '>';
+ return;
+ }
+ case Type::DXILPointerTyID:
+ // DXIL pointer types are only handled by the DirectX backend. To avoid
+ // extra dependencies we just print the pointer's address here.
+ OS << "dxil-ptr (" << Ty << ")";
+ return;
+ }
+ llvm_unreachable("Invalid TypeID");
+}
+
+void TypePrinting::printStructBody(StructType *STy, raw_ostream &OS) {
+ if (STy->isOpaque()) {
+ OS << "opaque";
+ return;
+ }
+
+ if (STy->isPacked())
+ OS << '<';
+
+ if (STy->getNumElements() == 0) {
+ OS << "{}";
+ } else {
+ OS << "{ ";
+ ListSeparator LS;
+ for (Type *Ty : STy->elements()) {
+ OS << LS;
+ print(Ty, OS);
+ }
+
+ OS << " }";
+ }
+ if (STy->isPacked())
+ OS << '>';
+}
+
+AbstractSlotTrackerStorage::~AbstractSlotTrackerStorage() = default;
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// SlotTracker Class: Enumerate slot numbers for unnamed values
+//===----------------------------------------------------------------------===//
+/// This class provides computation of slot numbers for LLVM Assembly writing.
+///
+class SlotTracker : public AbstractSlotTrackerStorage {
+public:
+ /// ValueMap - A mapping of Values to slot numbers.
+ using ValueMap = DenseMap<const Value *, unsigned>;
+
+private:
+ /// TheModule - The module for which we are holding slot numbers.
+ const Module* TheModule;
+
+ /// TheFunction - The function for which we are holding slot numbers.
+ const Function* TheFunction = nullptr;
+ bool FunctionProcessed = false;
+ bool ShouldInitializeAllMetadata;
+
+ std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>
+ ProcessModuleHookFn;
+ std::function<void(AbstractSlotTrackerStorage *, const Function *, bool)>
+ ProcessFunctionHookFn;
+
+ /// The summary index for which we are holding slot numbers.
+ const ModuleSummaryIndex *TheIndex = nullptr;
+
+ /// mMap - The slot map for the module level data.
+ ValueMap mMap;
+ unsigned mNext = 0;
+
+ /// fMap - The slot map for the function level data.
+ ValueMap fMap;
+ unsigned fNext = 0;
+
+ /// mdnMap - Map for MDNodes.
+ DenseMap<const MDNode*, unsigned> mdnMap;
+ unsigned mdnNext = 0;
+
+ /// asMap - The slot map for attribute sets.
+ DenseMap<AttributeSet, unsigned> asMap;
+ unsigned asNext = 0;
+
+ /// ModulePathMap - The slot map for Module paths used in the summary index.
+ StringMap<unsigned> ModulePathMap;
+ unsigned ModulePathNext = 0;
+
+ /// GUIDMap - The slot map for GUIDs used in the summary index.
+ DenseMap<GlobalValue::GUID, unsigned> GUIDMap;
+ unsigned GUIDNext = 0;
+
+ /// TypeIdMap - The slot map for type ids used in the summary index.
+ StringMap<unsigned> TypeIdMap;
+ unsigned TypeIdNext = 0;
+
+public:
+ /// Construct from a module.
+ ///
+ /// If \c ShouldInitializeAllMetadata, initializes all metadata in all
+ /// functions, giving correct numbering for metadata referenced only from
+ /// within a function (even if no functions have been initialized).
+ explicit SlotTracker(const Module *M,
+ bool ShouldInitializeAllMetadata = false);
+
+ /// Construct from a function, starting out in incorp state.
+ ///
+ /// If \c ShouldInitializeAllMetadata, initializes all metadata in all
+ /// functions, giving correct numbering for metadata referenced only from
+ /// within a function (even if no functions have been initialized).
+ explicit SlotTracker(const Function *F,
+ bool ShouldInitializeAllMetadata = false);
+
+ /// Construct from a module summary index.
+ explicit SlotTracker(const ModuleSummaryIndex *Index);
+
+ SlotTracker(const SlotTracker &) = delete;
+ SlotTracker &operator=(const SlotTracker &) = delete;
+
+ ~SlotTracker() = default;
+
+ void setProcessHook(
+ std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>);
+ void setProcessHook(std::function<void(AbstractSlotTrackerStorage *,
+ const Function *, bool)>);
+
+ unsigned getNextMetadataSlot() override { return mdnNext; }
+
+ void createMetadataSlot(const MDNode *N) override;
+
+ /// Return the slot number of the specified value in it's type
+ /// plane. If something is not in the SlotTracker, return -1.
+ int getLocalSlot(const Value *V);
+ int getGlobalSlot(const GlobalValue *V);
+ int getMetadataSlot(const MDNode *N) override;
+ int getAttributeGroupSlot(AttributeSet AS);
+ int getModulePathSlot(StringRef Path);
+ int getGUIDSlot(GlobalValue::GUID GUID);
+ int getTypeIdSlot(StringRef Id);
+
+ /// If you'd like to deal with a function instead of just a module, use
+ /// this method to get its data into the SlotTracker.
+ void incorporateFunction(const Function *F) {
+ TheFunction = F;
+ FunctionProcessed = false;
+ }
+
+ const Function *getFunction() const { return TheFunction; }
+
+ /// After calling incorporateFunction, use this method to remove the
+ /// most recently incorporated function from the SlotTracker. This
+ /// will reset the state of the machine back to just the module contents.
+ void purgeFunction();
+
+ /// MDNode map iterators.
+ using mdn_iterator = DenseMap<const MDNode*, unsigned>::iterator;
+
+ mdn_iterator mdn_begin() { return mdnMap.begin(); }
+ mdn_iterator mdn_end() { return mdnMap.end(); }
+ unsigned mdn_size() const { return mdnMap.size(); }
+ bool mdn_empty() const { return mdnMap.empty(); }
+
+ /// AttributeSet map iterators.
+ using as_iterator = DenseMap<AttributeSet, unsigned>::iterator;
+
+ as_iterator as_begin() { return asMap.begin(); }
+ as_iterator as_end() { return asMap.end(); }
+ unsigned as_size() const { return asMap.size(); }
+ bool as_empty() const { return asMap.empty(); }
+
+ /// GUID map iterators.
+ using guid_iterator = DenseMap<GlobalValue::GUID, unsigned>::iterator;
+
+ /// These functions do the actual initialization.
+ inline void initializeIfNeeded();
+ int initializeIndexIfNeeded();
+
+ // Implementation Details
+private:
+ /// CreateModuleSlot - Insert the specified GlobalValue* into the slot table.
+ void CreateModuleSlot(const GlobalValue *V);
+
+ /// CreateMetadataSlot - Insert the specified MDNode* into the slot table.
+ void CreateMetadataSlot(const MDNode *N);
+
+ /// CreateFunctionSlot - Insert the specified Value* into the slot table.
+ void CreateFunctionSlot(const Value *V);
+
+ /// Insert the specified AttributeSet into the slot table.
+ void CreateAttributeSetSlot(AttributeSet AS);
+
+ inline void CreateModulePathSlot(StringRef Path);
+ void CreateGUIDSlot(GlobalValue::GUID GUID);
+ void CreateTypeIdSlot(StringRef Id);
+
+ /// Add all of the module level global variables (and their initializers)
+ /// and function declarations, but not the contents of those functions.
+ void processModule();
+ // Returns number of allocated slots
+ int processIndex();
+
+ /// Add all of the functions arguments, basic blocks, and instructions.
+ void processFunction();
+
+ /// Add the metadata directly attached to a GlobalObject.
+ void processGlobalObjectMetadata(const GlobalObject &GO);
+
+ /// Add all of the metadata from a function.
+ void processFunctionMetadata(const Function &F);
+
+ /// Add all of the metadata from an instruction.
+ void processInstructionMetadata(const Instruction &I);
+};
+
+} // end namespace llvm
+
+ModuleSlotTracker::ModuleSlotTracker(SlotTracker &Machine, const Module *M,
+ const Function *F)
+ : M(M), F(F), Machine(&Machine) {}
+
+ModuleSlotTracker::ModuleSlotTracker(const Module *M,
+ bool ShouldInitializeAllMetadata)
+ : ShouldCreateStorage(M),
+ ShouldInitializeAllMetadata(ShouldInitializeAllMetadata), M(M) {}
+
+ModuleSlotTracker::~ModuleSlotTracker() = default;
+
+SlotTracker *ModuleSlotTracker::getMachine() {
+ if (!ShouldCreateStorage)
+ return Machine;
+
+ ShouldCreateStorage = false;
+ MachineStorage =
+ std::make_unique<SlotTracker>(M, ShouldInitializeAllMetadata);
+ Machine = MachineStorage.get();
+ if (ProcessModuleHookFn)
+ Machine->setProcessHook(ProcessModuleHookFn);
+ if (ProcessFunctionHookFn)
+ Machine->setProcessHook(ProcessFunctionHookFn);
+ return Machine;
+}
+
+void ModuleSlotTracker::incorporateFunction(const Function &F) {
+ // Using getMachine() may lazily create the slot tracker.
+ if (!getMachine())
+ return;
+
+ // Nothing to do if this is the right function already.
+ if (this->F == &F)
+ return;
+ if (this->F)
+ Machine->purgeFunction();
+ Machine->incorporateFunction(&F);
+ this->F = &F;
+}
+
+int ModuleSlotTracker::getLocalSlot(const Value *V) {
+ assert(F && "No function incorporated");
+ return Machine->getLocalSlot(V);
+}
+
+void ModuleSlotTracker::setProcessHook(
+ std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>
+ Fn) {
+ ProcessModuleHookFn = Fn;
+}
+
+void ModuleSlotTracker::setProcessHook(
+ std::function<void(AbstractSlotTrackerStorage *, const Function *, bool)>
+ Fn) {
+ ProcessFunctionHookFn = Fn;
+}
+
+static SlotTracker *createSlotTracker(const Value *V) {
+ if (const Argument *FA = dyn_cast<Argument>(V))
+ return new SlotTracker(FA->getParent());
+
+ if (const Instruction *I = dyn_cast<Instruction>(V))
+ if (I->getParent())
+ return new SlotTracker(I->getParent()->getParent());
+
+ if (const BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ return new SlotTracker(BB->getParent());
+
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
+ return new SlotTracker(GV->getParent());
+
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+ return new SlotTracker(GA->getParent());
+
+ if (const GlobalIFunc *GIF = dyn_cast<GlobalIFunc>(V))
+ return new SlotTracker(GIF->getParent());
+
+ if (const Function *Func = dyn_cast<Function>(V))
+ return new SlotTracker(Func);
+
+ return nullptr;
+}
+
+#if 0
+#define ST_DEBUG(X) dbgs() << X
+#else
+#define ST_DEBUG(X)
+#endif
+
+// Module level constructor. Causes the contents of the Module (sans functions)
+// to be added to the slot table.
+SlotTracker::SlotTracker(const Module *M, bool ShouldInitializeAllMetadata)
+ : TheModule(M), ShouldInitializeAllMetadata(ShouldInitializeAllMetadata) {}
+
+// Function level constructor. Causes the contents of the Module and the one
+// function provided to be added to the slot table.
+SlotTracker::SlotTracker(const Function *F, bool ShouldInitializeAllMetadata)
+ : TheModule(F ? F->getParent() : nullptr), TheFunction(F),
+ ShouldInitializeAllMetadata(ShouldInitializeAllMetadata) {}
+
+SlotTracker::SlotTracker(const ModuleSummaryIndex *Index)
+ : TheModule(nullptr), ShouldInitializeAllMetadata(false), TheIndex(Index) {}
+
+inline void SlotTracker::initializeIfNeeded() {
+ if (TheModule) {
+ processModule();
+ TheModule = nullptr; ///< Prevent re-processing next time we're called.
+ }
+
+ if (TheFunction && !FunctionProcessed)
+ processFunction();
+}
+
+int SlotTracker::initializeIndexIfNeeded() {
+ if (!TheIndex)
+ return 0;
+ int NumSlots = processIndex();
+ TheIndex = nullptr; ///< Prevent re-processing next time we're called.
+ return NumSlots;
+}
+
+// Iterate through all the global variables, functions, and global
+// variable initializers and create slots for them.
+void SlotTracker::processModule() {
+ ST_DEBUG("begin processModule!\n");
+
+ // Add all of the unnamed global variables to the value table.
+ for (const GlobalVariable &Var : TheModule->globals()) {
+ if (!Var.hasName())
+ CreateModuleSlot(&Var);
+ processGlobalObjectMetadata(Var);
+ auto Attrs = Var.getAttributes();
+ if (Attrs.hasAttributes())
+ CreateAttributeSetSlot(Attrs);
+ }
+
+ for (const GlobalAlias &A : TheModule->aliases()) {
+ if (!A.hasName())
+ CreateModuleSlot(&A);
+ }
+
+ for (const GlobalIFunc &I : TheModule->ifuncs()) {
+ if (!I.hasName())
+ CreateModuleSlot(&I);
+ }
+
+ // Add metadata used by named metadata.
+ for (const NamedMDNode &NMD : TheModule->named_metadata()) {
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
+ CreateMetadataSlot(NMD.getOperand(i));
+ }
+
+ for (const Function &F : *TheModule) {
+ if (!F.hasName())
+ // Add all the unnamed functions to the table.
+ CreateModuleSlot(&F);
+
+ if (ShouldInitializeAllMetadata)
+ processFunctionMetadata(F);
+
+ // Add all the function attributes to the table.
+ // FIXME: Add attributes of other objects?
+ AttributeSet FnAttrs = F.getAttributes().getFnAttrs();
+ if (FnAttrs.hasAttributes())
+ CreateAttributeSetSlot(FnAttrs);
+ }
+
+ if (ProcessModuleHookFn)
+ ProcessModuleHookFn(this, TheModule, ShouldInitializeAllMetadata);
+
+ ST_DEBUG("end processModule!\n");
+}
+
+// Process the arguments, basic blocks, and instructions of a function.
+void SlotTracker::processFunction() {
+ ST_DEBUG("begin processFunction!\n");
+ fNext = 0;
+
+ // Process function metadata if it wasn't hit at the module-level.
+ if (!ShouldInitializeAllMetadata)
+ processFunctionMetadata(*TheFunction);
+
+ // Add all the function arguments with no names.
+ for(Function::const_arg_iterator AI = TheFunction->arg_begin(),
+ AE = TheFunction->arg_end(); AI != AE; ++AI)
+ if (!AI->hasName())
+ CreateFunctionSlot(&*AI);
+
+ ST_DEBUG("Inserting Instructions:\n");
+
+ // Add all of the basic blocks and instructions with no names.
+ for (auto &BB : *TheFunction) {
+ if (!BB.hasName())
+ CreateFunctionSlot(&BB);
+
+ for (auto &I : BB) {
+ if (!I.getType()->isVoidTy() && !I.hasName())
+ CreateFunctionSlot(&I);
+
+ // We allow direct calls to any llvm.foo function here, because the
+ // target may not be linked into the optimizer.
+ if (const auto *Call = dyn_cast<CallBase>(&I)) {
+ // Add all the call attributes to the table.
+ AttributeSet Attrs = Call->getAttributes().getFnAttrs();
+ if (Attrs.hasAttributes())
+ CreateAttributeSetSlot(Attrs);
+ }
+ }
+ }
+
+ if (ProcessFunctionHookFn)
+ ProcessFunctionHookFn(this, TheFunction, ShouldInitializeAllMetadata);
+
+ FunctionProcessed = true;
+
+ ST_DEBUG("end processFunction!\n");
+}
+
+// Iterate through all the GUID in the index and create slots for them.
+int SlotTracker::processIndex() {
+ ST_DEBUG("begin processIndex!\n");
+ assert(TheIndex);
+
+ // The first block of slots are just the module ids, which start at 0 and are
+ // assigned consecutively. Since the StringMap iteration order isn't
+ // guaranteed, use a std::map to order by module ID before assigning slots.
+ std::map<uint64_t, StringRef> ModuleIdToPathMap;
+ for (auto &ModPath : TheIndex->modulePaths())
+ ModuleIdToPathMap[ModPath.second.first] = ModPath.first();
+ for (auto &ModPair : ModuleIdToPathMap)
+ CreateModulePathSlot(ModPair.second);
+
+ // Start numbering the GUIDs after the module ids.
+ GUIDNext = ModulePathNext;
+
+ for (auto &GlobalList : *TheIndex)
+ CreateGUIDSlot(GlobalList.first);
+
+ for (auto &TId : TheIndex->typeIdCompatibleVtableMap())
+ CreateGUIDSlot(GlobalValue::getGUID(TId.first));
+
+ // Start numbering the TypeIds after the GUIDs.
+ TypeIdNext = GUIDNext;
+ for (const auto &TID : TheIndex->typeIds())
+ CreateTypeIdSlot(TID.second.first);
+
+ ST_DEBUG("end processIndex!\n");
+ return TypeIdNext;
+}
+
+void SlotTracker::processGlobalObjectMetadata(const GlobalObject &GO) {
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ GO.getAllMetadata(MDs);
+ for (auto &MD : MDs)
+ CreateMetadataSlot(MD.second);
+}
+
+void SlotTracker::processFunctionMetadata(const Function &F) {
+ processGlobalObjectMetadata(F);
+ for (auto &BB : F) {
+ for (auto &I : BB)
+ processInstructionMetadata(I);
+ }
+}
+
+void SlotTracker::processInstructionMetadata(const Instruction &I) {
+ // Process metadata used directly by intrinsics.
+ if (const CallInst *CI = dyn_cast<CallInst>(&I))
+ if (Function *F = CI->getCalledFunction())
+ if (F->isIntrinsic())
+ for (auto &Op : I.operands())
+ if (auto *V = dyn_cast_or_null<MetadataAsValue>(Op))
+ if (MDNode *N = dyn_cast<MDNode>(V->getMetadata()))
+ CreateMetadataSlot(N);
+
+ // Process metadata attached to this instruction.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ I.getAllMetadata(MDs);
+ for (auto &MD : MDs)
+ CreateMetadataSlot(MD.second);
+}
+
+/// Clean up after incorporating a function. This is the only way to get out of
+/// the function incorporation state that affects get*Slot/Create*Slot. Function
+/// incorporation state is indicated by TheFunction != 0.
+void SlotTracker::purgeFunction() {
+ ST_DEBUG("begin purgeFunction!\n");
+ fMap.clear(); // Simply discard the function level map
+ TheFunction = nullptr;
+ FunctionProcessed = false;
+ ST_DEBUG("end purgeFunction!\n");
+}
+
+/// getGlobalSlot - Get the slot number of a global value.
+int SlotTracker::getGlobalSlot(const GlobalValue *V) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIfNeeded();
+
+ // Find the value in the module map
+ ValueMap::iterator MI = mMap.find(V);
+ return MI == mMap.end() ? -1 : (int)MI->second;
+}
+
+void SlotTracker::setProcessHook(
+ std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>
+ Fn) {
+ ProcessModuleHookFn = Fn;
+}
+
+void SlotTracker::setProcessHook(
+ std::function<void(AbstractSlotTrackerStorage *, const Function *, bool)>
+ Fn) {
+ ProcessFunctionHookFn = Fn;
+}
+
+/// getMetadataSlot - Get the slot number of a MDNode.
+void SlotTracker::createMetadataSlot(const MDNode *N) { CreateMetadataSlot(N); }
+
+/// getMetadataSlot - Get the slot number of a MDNode.
+int SlotTracker::getMetadataSlot(const MDNode *N) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIfNeeded();
+
+ // Find the MDNode in the module map
+ mdn_iterator MI = mdnMap.find(N);
+ return MI == mdnMap.end() ? -1 : (int)MI->second;
+}
+
+/// getLocalSlot - Get the slot number for a value that is local to a function.
+int SlotTracker::getLocalSlot(const Value *V) {
+ assert(!isa<Constant>(V) && "Can't get a constant or global slot with this!");
+
+ // Check for uninitialized state and do lazy initialization.
+ initializeIfNeeded();
+
+ ValueMap::iterator FI = fMap.find(V);
+ return FI == fMap.end() ? -1 : (int)FI->second;
+}
+
+int SlotTracker::getAttributeGroupSlot(AttributeSet AS) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIfNeeded();
+
+ // Find the AttributeSet in the module map.
+ as_iterator AI = asMap.find(AS);
+ return AI == asMap.end() ? -1 : (int)AI->second;
+}
+
+int SlotTracker::getModulePathSlot(StringRef Path) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIndexIfNeeded();
+
+ // Find the Module path in the map
+ auto I = ModulePathMap.find(Path);
+ return I == ModulePathMap.end() ? -1 : (int)I->second;
+}
+
+int SlotTracker::getGUIDSlot(GlobalValue::GUID GUID) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIndexIfNeeded();
+
+ // Find the GUID in the map
+ guid_iterator I = GUIDMap.find(GUID);
+ return I == GUIDMap.end() ? -1 : (int)I->second;
+}
+
+int SlotTracker::getTypeIdSlot(StringRef Id) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIndexIfNeeded();
+
+ // Find the TypeId string in the map
+ auto I = TypeIdMap.find(Id);
+ return I == TypeIdMap.end() ? -1 : (int)I->second;
+}
+
+/// CreateModuleSlot - Insert the specified GlobalValue* into the slot table.
+void SlotTracker::CreateModuleSlot(const GlobalValue *V) {
+ assert(V && "Can't insert a null Value into SlotTracker!");
+ assert(!V->getType()->isVoidTy() && "Doesn't need a slot!");
+ assert(!V->hasName() && "Doesn't need a slot!");
+
+ unsigned DestSlot = mNext++;
+ mMap[V] = DestSlot;
+
+ ST_DEBUG(" Inserting value [" << V->getType() << "] = " << V << " slot=" <<
+ DestSlot << " [");
+ // G = Global, F = Function, A = Alias, I = IFunc, o = other
+ ST_DEBUG((isa<GlobalVariable>(V) ? 'G' :
+ (isa<Function>(V) ? 'F' :
+ (isa<GlobalAlias>(V) ? 'A' :
+ (isa<GlobalIFunc>(V) ? 'I' : 'o')))) << "]\n");
+}
+
+/// CreateSlot - Create a new slot for the specified value if it has no name.
+void SlotTracker::CreateFunctionSlot(const Value *V) {
+ assert(!V->getType()->isVoidTy() && !V->hasName() && "Doesn't need a slot!");
+
+ unsigned DestSlot = fNext++;
+ fMap[V] = DestSlot;
+
+ // G = Global, F = Function, o = other
+ ST_DEBUG(" Inserting value [" << V->getType() << "] = " << V << " slot=" <<
+ DestSlot << " [o]\n");
+}
+
+/// CreateModuleSlot - Insert the specified MDNode* into the slot table.
+void SlotTracker::CreateMetadataSlot(const MDNode *N) {
+ assert(N && "Can't insert a null Value into SlotTracker!");
+
+ // Don't make slots for DIExpressions or DIArgLists. We just print them inline
+ // everywhere.
+ if (isa<DIExpression>(N) || isa<DIArgList>(N))
+ return;
+
+ unsigned DestSlot = mdnNext;
+ if (!mdnMap.insert(std::make_pair(N, DestSlot)).second)
+ return;
+ ++mdnNext;
+
+ // Recursively add any MDNodes referenced by operands.
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+ if (const MDNode *Op = dyn_cast_or_null<MDNode>(N->getOperand(i)))
+ CreateMetadataSlot(Op);
+}
+
+void SlotTracker::CreateAttributeSetSlot(AttributeSet AS) {
+ assert(AS.hasAttributes() && "Doesn't need a slot!");
+
+ as_iterator I = asMap.find(AS);
+ if (I != asMap.end())
+ return;
+
+ unsigned DestSlot = asNext++;
+ asMap[AS] = DestSlot;
+}
+
+/// Create a new slot for the specified Module
+void SlotTracker::CreateModulePathSlot(StringRef Path) {
+ ModulePathMap[Path] = ModulePathNext++;
+}
+
+/// Create a new slot for the specified GUID
+void SlotTracker::CreateGUIDSlot(GlobalValue::GUID GUID) {
+ GUIDMap[GUID] = GUIDNext++;
+}
+
+/// Create a new slot for the specified Id
+void SlotTracker::CreateTypeIdSlot(StringRef Id) {
+ TypeIdMap[Id] = TypeIdNext++;
+}
+
+namespace {
+/// Common instances used by most of the printer functions.
+struct AsmWriterContext {
+ TypePrinting *TypePrinter = nullptr;
+ SlotTracker *Machine = nullptr;
+ const Module *Context = nullptr;
+
+ AsmWriterContext(TypePrinting *TP, SlotTracker *ST, const Module *M = nullptr)
+ : TypePrinter(TP), Machine(ST), Context(M) {}
+
+ static AsmWriterContext &getEmpty() {
+ static AsmWriterContext EmptyCtx(nullptr, nullptr);
+ return EmptyCtx;
+ }
+
+ /// A callback that will be triggered when the underlying printer
+ /// prints a Metadata as operand.
+ virtual void onWriteMetadataAsOperand(const Metadata *) {}
+
+ virtual ~AsmWriterContext() = default;
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AsmWriter Implementation
+//===----------------------------------------------------------------------===//
+
+static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
+ AsmWriterContext &WriterCtx);
+
+static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
+ AsmWriterContext &WriterCtx,
+ bool FromValue = false);
+
+static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
+ if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U))
+ Out << FPO->getFastMathFlags();
+
+ if (const OverflowingBinaryOperator *OBO =
+ dyn_cast<OverflowingBinaryOperator>(U)) {
+ if (OBO->hasNoUnsignedWrap())
+ Out << " nuw";
+ if (OBO->hasNoSignedWrap())
+ Out << " nsw";
+ } else if (const PossiblyExactOperator *Div =
+ dyn_cast<PossiblyExactOperator>(U)) {
+ if (Div->isExact())
+ Out << " exact";
+ } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+ if (GEP->isInBounds())
+ Out << " inbounds";
+ }
+}
+
+static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
+ AsmWriterContext &WriterCtx) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
+ if (CI->getType()->isIntegerTy(1)) {
+ Out << (CI->getZExtValue() ? "true" : "false");
+ return;
+ }
+ Out << CI->getValue();
+ return;
+ }
+
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+ const APFloat &APF = CFP->getValueAPF();
+ if (&APF.getSemantics() == &APFloat::IEEEsingle() ||
+ &APF.getSemantics() == &APFloat::IEEEdouble()) {
+ // We would like to output the FP constant value in exponential notation,
+ // but we cannot do this if doing so will lose precision. Check here to
+ // make sure that we only output it in exponential format if we can parse
+ // the value back and get the same value.
+ //
+ bool ignored;
+ bool isDouble = &APF.getSemantics() == &APFloat::IEEEdouble();
+ bool isInf = APF.isInfinity();
+ bool isNaN = APF.isNaN();
+ if (!isInf && !isNaN) {
+ double Val = APF.convertToDouble();
+ SmallString<128> StrVal;
+ APF.toString(StrVal, 6, 0, false);
+ // Check to make sure that the stringized number is not some string like
+ // "Inf" or NaN, that atof will accept, but the lexer will not. Check
+ // that the string matches the "[-+]?[0-9]" regex.
+ //
+ assert((isDigit(StrVal[0]) || ((StrVal[0] == '-' || StrVal[0] == '+') &&
+ isDigit(StrVal[1]))) &&
+ "[-+]?[0-9] regex does not match!");
+ // Reparse stringized version!
+ if (APFloat(APFloat::IEEEdouble(), StrVal).convertToDouble() == Val) {
+ Out << StrVal;
+ return;
+ }
+ }
+ // Otherwise we could not reparse it to exactly the same value, so we must
+ // output the string in hexadecimal format! Note that loading and storing
+ // floating point types changes the bits of NaNs on some hosts, notably
+ // x86, so we must not use these types.
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "assuming that double is 64 bits!");
+ APFloat apf = APF;
+ // Floats are represented in ASCII IR as double, convert.
+ // FIXME: We should allow 32-bit hex float and remove this.
+ if (!isDouble) {
+ // A signaling NaN is quieted on conversion, so we need to recreate the
+ // expected value after convert (quiet bit of the payload is clear).
+ bool IsSNAN = apf.isSignaling();
+ apf.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
+ &ignored);
+ if (IsSNAN) {
+ APInt Payload = apf.bitcastToAPInt();
+ apf = APFloat::getSNaN(APFloat::IEEEdouble(), apf.isNegative(),
+ &Payload);
+ }
+ }
+ Out << format_hex(apf.bitcastToAPInt().getZExtValue(), 0, /*Upper=*/true);
+ return;
+ }
+
+ // Either half, bfloat or some form of long double.
+ // These appear as a magic letter identifying the type, then a
+ // fixed number of hex digits.
+ Out << "0x";
+ APInt API = APF.bitcastToAPInt();
+ if (&APF.getSemantics() == &APFloat::x87DoubleExtended()) {
+ Out << 'K';
+ Out << format_hex_no_prefix(API.getHiBits(16).getZExtValue(), 4,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ return;
+ } else if (&APF.getSemantics() == &APFloat::IEEEquad()) {
+ Out << 'L';
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::PPCDoubleDouble()) {
+ Out << 'M';
+ Out << format_hex_no_prefix(API.getLoBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ Out << format_hex_no_prefix(API.getHiBits(64).getZExtValue(), 16,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::IEEEhalf()) {
+ Out << 'H';
+ Out << format_hex_no_prefix(API.getZExtValue(), 4,
+ /*Upper=*/true);
+ } else if (&APF.getSemantics() == &APFloat::BFloat()) {
+ Out << 'R';
+ Out << format_hex_no_prefix(API.getZExtValue(), 4,
+ /*Upper=*/true);
+ } else
+ llvm_unreachable("Unsupported floating point type");
+ return;
+ }
+
+ if (isa<ConstantAggregateZero>(CV)) {
+ Out << "zeroinitializer";
+ return;
+ }
+
+ if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
+ Out << "blockaddress(";
+ WriteAsOperandInternal(Out, BA->getFunction(), WriterCtx);
+ Out << ", ";
+ WriteAsOperandInternal(Out, BA->getBasicBlock(), WriterCtx);
+ Out << ")";
+ return;
+ }
+
+ if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(CV)) {
+ Out << "dso_local_equivalent ";
+ WriteAsOperandInternal(Out, Equiv->getGlobalValue(), WriterCtx);
+ return;
+ }
+
+ if (const auto *NC = dyn_cast<NoCFIValue>(CV)) {
+ Out << "no_cfi ";
+ WriteAsOperandInternal(Out, NC->getGlobalValue(), WriterCtx);
+ return;
+ }
+
+ if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
+ Type *ETy = CA->getType()->getElementType();
+ Out << '[';
+ WriterCtx.TypePrinter->print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getOperand(0), WriterCtx);
+ for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
+ Out << ", ";
+ WriterCtx.TypePrinter->print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getOperand(i), WriterCtx);
+ }
+ Out << ']';
+ return;
+ }
+
+ if (const ConstantDataArray *CA = dyn_cast<ConstantDataArray>(CV)) {
+ // As a special case, print the array as a string if it is an array of
+ // i8 with ConstantInt values.
+ if (CA->isString()) {
+ Out << "c\"";
+ printEscapedString(CA->getAsString(), Out);
+ Out << '"';
+ return;
+ }
+
+ Type *ETy = CA->getType()->getElementType();
+ Out << '[';
+ WriterCtx.TypePrinter->print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getElementAsConstant(0), WriterCtx);
+ for (unsigned i = 1, e = CA->getNumElements(); i != e; ++i) {
+ Out << ", ";
+ WriterCtx.TypePrinter->print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CA->getElementAsConstant(i), WriterCtx);
+ }
+ Out << ']';
+ return;
+ }
+
+ if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
+ if (CS->getType()->isPacked())
+ Out << '<';
+ Out << '{';
+ unsigned N = CS->getNumOperands();
+ if (N) {
+ Out << ' ';
+ WriterCtx.TypePrinter->print(CS->getOperand(0)->getType(), Out);
+ Out << ' ';
+
+ WriteAsOperandInternal(Out, CS->getOperand(0), WriterCtx);
+
+ for (unsigned i = 1; i < N; i++) {
+ Out << ", ";
+ WriterCtx.TypePrinter->print(CS->getOperand(i)->getType(), Out);
+ Out << ' ';
+
+ WriteAsOperandInternal(Out, CS->getOperand(i), WriterCtx);
+ }
+ Out << ' ';
+ }
+
+ Out << '}';
+ if (CS->getType()->isPacked())
+ Out << '>';
+ return;
+ }
+
+ if (isa<ConstantVector>(CV) || isa<ConstantDataVector>(CV)) {
+ auto *CVVTy = cast<FixedVectorType>(CV->getType());
+ Type *ETy = CVVTy->getElementType();
+ Out << '<';
+ WriterCtx.TypePrinter->print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CV->getAggregateElement(0U), WriterCtx);
+ for (unsigned i = 1, e = CVVTy->getNumElements(); i != e; ++i) {
+ Out << ", ";
+ WriterCtx.TypePrinter->print(ETy, Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, CV->getAggregateElement(i), WriterCtx);
+ }
+ Out << '>';
+ return;
+ }
+
+ if (isa<ConstantPointerNull>(CV)) {
+ Out << "null";
+ return;
+ }
+
+ if (isa<ConstantTokenNone>(CV)) {
+ Out << "none";
+ return;
+ }
+
+ if (isa<PoisonValue>(CV)) {
+ Out << "poison";
+ return;
+ }
+
+ if (isa<UndefValue>(CV)) {
+ Out << "undef";
+ return;
+ }
+
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+ Out << CE->getOpcodeName();
+ WriteOptimizationInfo(Out, CE);
+ if (CE->isCompare())
+ Out << ' ' << CmpInst::getPredicateName(
+ static_cast<CmpInst::Predicate>(CE->getPredicate()));
+ Out << " (";
+
+ Optional<unsigned> InRangeOp;
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
+ WriterCtx.TypePrinter->print(GEP->getSourceElementType(), Out);
+ Out << ", ";
+ InRangeOp = GEP->getInRangeIndex();
+ if (InRangeOp)
+ ++*InRangeOp;
+ }
+
+ for (User::const_op_iterator OI=CE->op_begin(); OI != CE->op_end(); ++OI) {
+ if (InRangeOp && unsigned(OI - CE->op_begin()) == *InRangeOp)
+ Out << "inrange ";
+ WriterCtx.TypePrinter->print((*OI)->getType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, *OI, WriterCtx);
+ if (OI+1 != CE->op_end())
+ Out << ", ";
+ }
+
+ if (CE->isCast()) {
+ Out << " to ";
+ WriterCtx.TypePrinter->print(CE->getType(), Out);
+ }
+
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ PrintShuffleMask(Out, CE->getType(), CE->getShuffleMask());
+
+ Out << ')';
+ return;
+ }
+
+ Out << "<placeholder or erroneous Constant>";
+}
+
+static void writeMDTuple(raw_ostream &Out, const MDTuple *Node,
+ AsmWriterContext &WriterCtx) {
+ Out << "!{";
+ for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) {
+ const Metadata *MD = Node->getOperand(mi);
+ if (!MD)
+ Out << "null";
+ else if (auto *MDV = dyn_cast<ValueAsMetadata>(MD)) {
+ Value *V = MDV->getValue();
+ WriterCtx.TypePrinter->print(V->getType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, V, WriterCtx);
+ } else {
+ WriteAsOperandInternal(Out, MD, WriterCtx);
+ WriterCtx.onWriteMetadataAsOperand(MD);
+ }
+ if (mi + 1 != me)
+ Out << ", ";
+ }
+
+ Out << "}";
+}
+
+namespace {
+
+struct FieldSeparator {
+ bool Skip = true;
+ const char *Sep;
+
+ FieldSeparator(const char *Sep = ", ") : Sep(Sep) {}
+};
+
+raw_ostream &operator<<(raw_ostream &OS, FieldSeparator &FS) {
+ if (FS.Skip) {
+ FS.Skip = false;
+ return OS;
+ }
+ return OS << FS.Sep;
+}
+
+struct MDFieldPrinter {
+ raw_ostream &Out;
+ FieldSeparator FS;
+ AsmWriterContext &WriterCtx;
+
+ explicit MDFieldPrinter(raw_ostream &Out)
+ : Out(Out), WriterCtx(AsmWriterContext::getEmpty()) {}
+ MDFieldPrinter(raw_ostream &Out, AsmWriterContext &Ctx)
+ : Out(Out), WriterCtx(Ctx) {}
+
+ void printTag(const DINode *N);
+ void printMacinfoType(const DIMacroNode *N);
+ void printChecksum(const DIFile::ChecksumInfo<StringRef> &N);
+ void printString(StringRef Name, StringRef Value,
+ bool ShouldSkipEmpty = true);
+ void printMetadata(StringRef Name, const Metadata *MD,
+ bool ShouldSkipNull = true);
+ template <class IntTy>
+ void printInt(StringRef Name, IntTy Int, bool ShouldSkipZero = true);
+ void printAPInt(StringRef Name, const APInt &Int, bool IsUnsigned,
+ bool ShouldSkipZero);
+ void printBool(StringRef Name, bool Value, Optional<bool> Default = None);
+ void printDIFlags(StringRef Name, DINode::DIFlags Flags);
+ void printDISPFlags(StringRef Name, DISubprogram::DISPFlags Flags);
+ template <class IntTy, class Stringifier>
+ void printDwarfEnum(StringRef Name, IntTy Value, Stringifier toString,
+ bool ShouldSkipZero = true);
+ void printEmissionKind(StringRef Name, DICompileUnit::DebugEmissionKind EK);
+ void printNameTableKind(StringRef Name,
+ DICompileUnit::DebugNameTableKind NTK);
+};
+
+} // end anonymous namespace
+
+void MDFieldPrinter::printTag(const DINode *N) {
+ Out << FS << "tag: ";
+ auto Tag = dwarf::TagString(N->getTag());
+ if (!Tag.empty())
+ Out << Tag;
+ else
+ Out << N->getTag();
+}
+
+void MDFieldPrinter::printMacinfoType(const DIMacroNode *N) {
+ Out << FS << "type: ";
+ auto Type = dwarf::MacinfoString(N->getMacinfoType());
+ if (!Type.empty())
+ Out << Type;
+ else
+ Out << N->getMacinfoType();
+}
+
+void MDFieldPrinter::printChecksum(
+ const DIFile::ChecksumInfo<StringRef> &Checksum) {
+ Out << FS << "checksumkind: " << Checksum.getKindAsString();
+ printString("checksum", Checksum.Value, /* ShouldSkipEmpty */ false);
+}
+
+void MDFieldPrinter::printString(StringRef Name, StringRef Value,
+ bool ShouldSkipEmpty) {
+ if (ShouldSkipEmpty && Value.empty())
+ return;
+
+ Out << FS << Name << ": \"";
+ printEscapedString(Value, Out);
+ Out << "\"";
+}
+
+static void writeMetadataAsOperand(raw_ostream &Out, const Metadata *MD,
+ AsmWriterContext &WriterCtx) {
+ if (!MD) {
+ Out << "null";
+ return;
+ }
+ WriteAsOperandInternal(Out, MD, WriterCtx);
+ WriterCtx.onWriteMetadataAsOperand(MD);
+}
+
+void MDFieldPrinter::printMetadata(StringRef Name, const Metadata *MD,
+ bool ShouldSkipNull) {
+ if (ShouldSkipNull && !MD)
+ return;
+
+ Out << FS << Name << ": ";
+ writeMetadataAsOperand(Out, MD, WriterCtx);
+}
+
+template <class IntTy>
+void MDFieldPrinter::printInt(StringRef Name, IntTy Int, bool ShouldSkipZero) {
+ if (ShouldSkipZero && !Int)
+ return;
+
+ Out << FS << Name << ": " << Int;
+}
+
+void MDFieldPrinter::printAPInt(StringRef Name, const APInt &Int,
+ bool IsUnsigned, bool ShouldSkipZero) {
+ if (ShouldSkipZero && Int.isZero())
+ return;
+
+ Out << FS << Name << ": ";
+ Int.print(Out, !IsUnsigned);
+}
+
+void MDFieldPrinter::printBool(StringRef Name, bool Value,
+ Optional<bool> Default) {
+ if (Default && Value == *Default)
+ return;
+ Out << FS << Name << ": " << (Value ? "true" : "false");
+}
+
+void MDFieldPrinter::printDIFlags(StringRef Name, DINode::DIFlags Flags) {
+ if (!Flags)
+ return;
+
+ Out << FS << Name << ": ";
+
+ SmallVector<DINode::DIFlags, 8> SplitFlags;
+ auto Extra = DINode::splitFlags(Flags, SplitFlags);
+
+ FieldSeparator FlagsFS(" | ");
+ for (auto F : SplitFlags) {
+ auto StringF = DINode::getFlagString(F);
+ assert(!StringF.empty() && "Expected valid flag");
+ Out << FlagsFS << StringF;
+ }
+ if (Extra || SplitFlags.empty())
+ Out << FlagsFS << Extra;
+}
+
+void MDFieldPrinter::printDISPFlags(StringRef Name,
+ DISubprogram::DISPFlags Flags) {
+ // Always print this field, because no flags in the IR at all will be
+ // interpreted as old-style isDefinition: true.
+ Out << FS << Name << ": ";
+
+ if (!Flags) {
+ Out << 0;
+ return;
+ }
+
+ SmallVector<DISubprogram::DISPFlags, 8> SplitFlags;
+ auto Extra = DISubprogram::splitFlags(Flags, SplitFlags);
+
+ FieldSeparator FlagsFS(" | ");
+ for (auto F : SplitFlags) {
+ auto StringF = DISubprogram::getFlagString(F);
+ assert(!StringF.empty() && "Expected valid flag");
+ Out << FlagsFS << StringF;
+ }
+ if (Extra || SplitFlags.empty())
+ Out << FlagsFS << Extra;
+}
+
+void MDFieldPrinter::printEmissionKind(StringRef Name,
+ DICompileUnit::DebugEmissionKind EK) {
+ Out << FS << Name << ": " << DICompileUnit::emissionKindString(EK);
+}
+
+void MDFieldPrinter::printNameTableKind(StringRef Name,
+ DICompileUnit::DebugNameTableKind NTK) {
+ if (NTK == DICompileUnit::DebugNameTableKind::Default)
+ return;
+ Out << FS << Name << ": " << DICompileUnit::nameTableKindString(NTK);
+}
+
+template <class IntTy, class Stringifier>
+void MDFieldPrinter::printDwarfEnum(StringRef Name, IntTy Value,
+ Stringifier toString, bool ShouldSkipZero) {
+ if (!Value)
+ return;
+
+ Out << FS << Name << ": ";
+ auto S = toString(Value);
+ if (!S.empty())
+ Out << S;
+ else
+ Out << Value;
+}
+
+static void writeGenericDINode(raw_ostream &Out, const GenericDINode *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!GenericDINode(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printTag(N);
+ Printer.printString("header", N->getHeader());
+ if (N->getNumDwarfOperands()) {
+ Out << Printer.FS << "operands: {";
+ FieldSeparator IFS;
+ for (auto &I : N->dwarf_operands()) {
+ Out << IFS;
+ writeMetadataAsOperand(Out, I, WriterCtx);
+ }
+ Out << "}";
+ }
+ Out << ")";
+}
+
+static void writeDILocation(raw_ostream &Out, const DILocation *DL,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DILocation(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ // Always output the line, since 0 is a relevant and important value for it.
+ Printer.printInt("line", DL->getLine(), /* ShouldSkipZero */ false);
+ Printer.printInt("column", DL->getColumn());
+ Printer.printMetadata("scope", DL->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("inlinedAt", DL->getRawInlinedAt());
+ Printer.printBool("isImplicitCode", DL->isImplicitCode(),
+ /* Default */ false);
+ Out << ")";
+}
+
+static void writeDISubrange(raw_ostream &Out, const DISubrange *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DISubrange(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+
+ auto *Count = N->getRawCountNode();
+ if (auto *CE = dyn_cast_or_null<ConstantAsMetadata>(Count)) {
+ auto *CV = cast<ConstantInt>(CE->getValue());
+ Printer.printInt("count", CV->getSExtValue(),
+ /* ShouldSkipZero */ false);
+ } else
+ Printer.printMetadata("count", Count, /*ShouldSkipNull */ true);
+
+ // A lowerBound of constant 0 should not be skipped, since it is different
+ // from an unspecified lower bound (= nullptr).
+ auto *LBound = N->getRawLowerBound();
+ if (auto *LE = dyn_cast_or_null<ConstantAsMetadata>(LBound)) {
+ auto *LV = cast<ConstantInt>(LE->getValue());
+ Printer.printInt("lowerBound", LV->getSExtValue(),
+ /* ShouldSkipZero */ false);
+ } else
+ Printer.printMetadata("lowerBound", LBound, /*ShouldSkipNull */ true);
+
+ auto *UBound = N->getRawUpperBound();
+ if (auto *UE = dyn_cast_or_null<ConstantAsMetadata>(UBound)) {
+ auto *UV = cast<ConstantInt>(UE->getValue());
+ Printer.printInt("upperBound", UV->getSExtValue(),
+ /* ShouldSkipZero */ false);
+ } else
+ Printer.printMetadata("upperBound", UBound, /*ShouldSkipNull */ true);
+
+ auto *Stride = N->getRawStride();
+ if (auto *SE = dyn_cast_or_null<ConstantAsMetadata>(Stride)) {
+ auto *SV = cast<ConstantInt>(SE->getValue());
+ Printer.printInt("stride", SV->getSExtValue(), /* ShouldSkipZero */ false);
+ } else
+ Printer.printMetadata("stride", Stride, /*ShouldSkipNull */ true);
+
+ Out << ")";
+}
+
+static void writeDIGenericSubrange(raw_ostream &Out, const DIGenericSubrange *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIGenericSubrange(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+
+ auto IsConstant = [&](Metadata *Bound) -> bool {
+ if (auto *BE = dyn_cast_or_null<DIExpression>(Bound)) {
+ return BE->isConstant() &&
+ DIExpression::SignedOrUnsignedConstant::SignedConstant ==
+ *BE->isConstant();
+ }
+ return false;
+ };
+
+ auto GetConstant = [&](Metadata *Bound) -> int64_t {
+ assert(IsConstant(Bound) && "Expected constant");
+ auto *BE = dyn_cast_or_null<DIExpression>(Bound);
+ return static_cast<int64_t>(BE->getElement(1));
+ };
+
+ auto *Count = N->getRawCountNode();
+ if (IsConstant(Count))
+ Printer.printInt("count", GetConstant(Count),
+ /* ShouldSkipZero */ false);
+ else
+ Printer.printMetadata("count", Count, /*ShouldSkipNull */ true);
+
+ auto *LBound = N->getRawLowerBound();
+ if (IsConstant(LBound))
+ Printer.printInt("lowerBound", GetConstant(LBound),
+ /* ShouldSkipZero */ false);
+ else
+ Printer.printMetadata("lowerBound", LBound, /*ShouldSkipNull */ true);
+
+ auto *UBound = N->getRawUpperBound();
+ if (IsConstant(UBound))
+ Printer.printInt("upperBound", GetConstant(UBound),
+ /* ShouldSkipZero */ false);
+ else
+ Printer.printMetadata("upperBound", UBound, /*ShouldSkipNull */ true);
+
+ auto *Stride = N->getRawStride();
+ if (IsConstant(Stride))
+ Printer.printInt("stride", GetConstant(Stride),
+ /* ShouldSkipZero */ false);
+ else
+ Printer.printMetadata("stride", Stride, /*ShouldSkipNull */ true);
+
+ Out << ")";
+}
+
+static void writeDIEnumerator(raw_ostream &Out, const DIEnumerator *N,
+ AsmWriterContext &) {
+ Out << "!DIEnumerator(";
+ MDFieldPrinter Printer(Out);
+ Printer.printString("name", N->getName(), /* ShouldSkipEmpty */ false);
+ Printer.printAPInt("value", N->getValue(), N->isUnsigned(),
+ /*ShouldSkipZero=*/false);
+ if (N->isUnsigned())
+ Printer.printBool("isUnsigned", true);
+ Out << ")";
+}
+
+static void writeDIBasicType(raw_ostream &Out, const DIBasicType *N,
+ AsmWriterContext &) {
+ Out << "!DIBasicType(";
+ MDFieldPrinter Printer(Out);
+ if (N->getTag() != dwarf::DW_TAG_base_type)
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printDwarfEnum("encoding", N->getEncoding(),
+ dwarf::AttributeEncodingString);
+ Printer.printDIFlags("flags", N->getFlags());
+ Out << ")";
+}
+
+static void writeDIStringType(raw_ostream &Out, const DIStringType *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIStringType(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ if (N->getTag() != dwarf::DW_TAG_string_type)
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("stringLength", N->getRawStringLength());
+ Printer.printMetadata("stringLengthExpression", N->getRawStringLengthExp());
+ Printer.printMetadata("stringLocationExpression",
+ N->getRawStringLocationExp());
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printDwarfEnum("encoding", N->getEncoding(),
+ dwarf::AttributeEncodingString);
+ Out << ")";
+}
+
+static void writeDIDerivedType(raw_ostream &Out, const DIDerivedType *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIDerivedType(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("baseType", N->getRawBaseType(),
+ /* ShouldSkipNull */ false);
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printInt("offset", N->getOffsetInBits());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printMetadata("extraData", N->getRawExtraData());
+ if (const auto &DWARFAddressSpace = N->getDWARFAddressSpace())
+ Printer.printInt("dwarfAddressSpace", *DWARFAddressSpace,
+ /* ShouldSkipZero */ false);
+ Printer.printMetadata("annotations", N->getRawAnnotations());
+ Out << ")";
+}
+
+static void writeDICompositeType(raw_ostream &Out, const DICompositeType *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DICompositeType(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("baseType", N->getRawBaseType());
+ Printer.printInt("size", N->getSizeInBits());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printInt("offset", N->getOffsetInBits());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printMetadata("elements", N->getRawElements());
+ Printer.printDwarfEnum("runtimeLang", N->getRuntimeLang(),
+ dwarf::LanguageString);
+ Printer.printMetadata("vtableHolder", N->getRawVTableHolder());
+ Printer.printMetadata("templateParams", N->getRawTemplateParams());
+ Printer.printString("identifier", N->getIdentifier());
+ Printer.printMetadata("discriminator", N->getRawDiscriminator());
+ Printer.printMetadata("dataLocation", N->getRawDataLocation());
+ Printer.printMetadata("associated", N->getRawAssociated());
+ Printer.printMetadata("allocated", N->getRawAllocated());
+ if (auto *RankConst = N->getRankConst())
+ Printer.printInt("rank", RankConst->getSExtValue(),
+ /* ShouldSkipZero */ false);
+ else
+ Printer.printMetadata("rank", N->getRawRank(), /*ShouldSkipNull */ true);
+ Printer.printMetadata("annotations", N->getRawAnnotations());
+ Out << ")";
+}
+
+static void writeDISubroutineType(raw_ostream &Out, const DISubroutineType *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DISubroutineType(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printDwarfEnum("cc", N->getCC(), dwarf::ConventionString);
+ Printer.printMetadata("types", N->getRawTypeArray(),
+ /* ShouldSkipNull */ false);
+ Out << ")";
+}
+
+static void writeDIFile(raw_ostream &Out, const DIFile *N, AsmWriterContext &) {
+ Out << "!DIFile(";
+ MDFieldPrinter Printer(Out);
+ Printer.printString("filename", N->getFilename(),
+ /* ShouldSkipEmpty */ false);
+ Printer.printString("directory", N->getDirectory(),
+ /* ShouldSkipEmpty */ false);
+ // Print all values for checksum together, or not at all.
+ if (N->getChecksum())
+ Printer.printChecksum(*N->getChecksum());
+ Printer.printString("source", N->getSource().value_or(StringRef()),
+ /* ShouldSkipEmpty */ true);
+ Out << ")";
+}
+
+static void writeDICompileUnit(raw_ostream &Out, const DICompileUnit *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DICompileUnit(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printDwarfEnum("language", N->getSourceLanguage(),
+ dwarf::LanguageString, /* ShouldSkipZero */ false);
+ Printer.printMetadata("file", N->getRawFile(), /* ShouldSkipNull */ false);
+ Printer.printString("producer", N->getProducer());
+ Printer.printBool("isOptimized", N->isOptimized());
+ Printer.printString("flags", N->getFlags());
+ Printer.printInt("runtimeVersion", N->getRuntimeVersion(),
+ /* ShouldSkipZero */ false);
+ Printer.printString("splitDebugFilename", N->getSplitDebugFilename());
+ Printer.printEmissionKind("emissionKind", N->getEmissionKind());
+ Printer.printMetadata("enums", N->getRawEnumTypes());
+ Printer.printMetadata("retainedTypes", N->getRawRetainedTypes());
+ Printer.printMetadata("globals", N->getRawGlobalVariables());
+ Printer.printMetadata("imports", N->getRawImportedEntities());
+ Printer.printMetadata("macros", N->getRawMacros());
+ Printer.printInt("dwoId", N->getDWOId());
+ Printer.printBool("splitDebugInlining", N->getSplitDebugInlining(), true);
+ Printer.printBool("debugInfoForProfiling", N->getDebugInfoForProfiling(),
+ false);
+ Printer.printNameTableKind("nameTableKind", N->getNameTableKind());
+ Printer.printBool("rangesBaseAddress", N->getRangesBaseAddress(), false);
+ Printer.printString("sysroot", N->getSysRoot());
+ Printer.printString("sdk", N->getSDK());
+ Out << ")";
+}
+
+static void writeDISubprogram(raw_ostream &Out, const DISubprogram *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DISubprogram(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printString("name", N->getName());
+ Printer.printString("linkageName", N->getLinkageName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printInt("scopeLine", N->getScopeLine());
+ Printer.printMetadata("containingType", N->getRawContainingType());
+ if (N->getVirtuality() != dwarf::DW_VIRTUALITY_none ||
+ N->getVirtualIndex() != 0)
+ Printer.printInt("virtualIndex", N->getVirtualIndex(), false);
+ Printer.printInt("thisAdjustment", N->getThisAdjustment());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printDISPFlags("spFlags", N->getSPFlags());
+ Printer.printMetadata("unit", N->getRawUnit());
+ Printer.printMetadata("templateParams", N->getRawTemplateParams());
+ Printer.printMetadata("declaration", N->getRawDeclaration());
+ Printer.printMetadata("retainedNodes", N->getRawRetainedNodes());
+ Printer.printMetadata("thrownTypes", N->getRawThrownTypes());
+ Printer.printMetadata("annotations", N->getRawAnnotations());
+ Printer.printString("targetFuncName", N->getTargetFuncName());
+ Out << ")";
+}
+
+static void writeDILexicalBlock(raw_ostream &Out, const DILexicalBlock *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DILexicalBlock(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printInt("column", N->getColumn());
+ Out << ")";
+}
+
+static void writeDILexicalBlockFile(raw_ostream &Out,
+ const DILexicalBlockFile *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DILexicalBlockFile(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("discriminator", N->getDiscriminator(),
+ /* ShouldSkipZero */ false);
+ Out << ")";
+}
+
+static void writeDINamespace(raw_ostream &Out, const DINamespace *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DINamespace(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printBool("exportSymbols", N->getExportSymbols(), false);
+ Out << ")";
+}
+
+static void writeDICommonBlock(raw_ostream &Out, const DICommonBlock *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DICommonBlock(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMetadata("scope", N->getRawScope(), false);
+ Printer.printMetadata("declaration", N->getRawDecl(), false);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLineNo());
+ Out << ")";
+}
+
+static void writeDIMacro(raw_ostream &Out, const DIMacro *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIMacro(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMacinfoType(N);
+ Printer.printInt("line", N->getLine());
+ Printer.printString("name", N->getName());
+ Printer.printString("value", N->getValue());
+ Out << ")";
+}
+
+static void writeDIMacroFile(raw_ostream &Out, const DIMacroFile *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIMacroFile(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("file", N->getRawFile(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("nodes", N->getRawElements());
+ Out << ")";
+}
+
+static void writeDIModule(raw_ostream &Out, const DIModule *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIModule(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printString("name", N->getName());
+ Printer.printString("configMacros", N->getConfigurationMacros());
+ Printer.printString("includePath", N->getIncludePath());
+ Printer.printString("apinotes", N->getAPINotesFile());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLineNo());
+ Printer.printBool("isDecl", N->getIsDecl(), /* Default */ false);
+ Out << ")";
+}
+
+static void writeDITemplateTypeParameter(raw_ostream &Out,
+ const DITemplateTypeParameter *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DITemplateTypeParameter(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("type", N->getRawType(), /* ShouldSkipNull */ false);
+ Printer.printBool("defaulted", N->isDefault(), /* Default= */ false);
+ Out << ")";
+}
+
+static void writeDITemplateValueParameter(raw_ostream &Out,
+ const DITemplateValueParameter *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DITemplateValueParameter(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ if (N->getTag() != dwarf::DW_TAG_template_value_parameter)
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printBool("defaulted", N->isDefault(), /* Default= */ false);
+ Printer.printMetadata("value", N->getValue(), /* ShouldSkipNull */ false);
+ Out << ")";
+}
+
+static void writeDIGlobalVariable(raw_ostream &Out, const DIGlobalVariable *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIGlobalVariable(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printString("name", N->getName());
+ Printer.printString("linkageName", N->getLinkageName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printBool("isLocal", N->isLocalToUnit());
+ Printer.printBool("isDefinition", N->isDefinition());
+ Printer.printMetadata("declaration", N->getRawStaticDataMemberDeclaration());
+ Printer.printMetadata("templateParams", N->getRawTemplateParams());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printMetadata("annotations", N->getRawAnnotations());
+ Out << ")";
+}
+
+static void writeDILocalVariable(raw_ostream &Out, const DILocalVariable *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DILocalVariable(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printString("name", N->getName());
+ Printer.printInt("arg", N->getArg());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("type", N->getRawType());
+ Printer.printDIFlags("flags", N->getFlags());
+ Printer.printInt("align", N->getAlignInBits());
+ Printer.printMetadata("annotations", N->getRawAnnotations());
+ Out << ")";
+}
+
+static void writeDILabel(raw_ostream &Out, const DILabel *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DILabel(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Out << ")";
+}
+
+static void writeDIExpression(raw_ostream &Out, const DIExpression *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIExpression(";
+ FieldSeparator FS;
+ if (N->isValid()) {
+ for (const DIExpression::ExprOperand &Op : N->expr_ops()) {
+ auto OpStr = dwarf::OperationEncodingString(Op.getOp());
+ assert(!OpStr.empty() && "Expected valid opcode");
+
+ Out << FS << OpStr;
+ if (Op.getOp() == dwarf::DW_OP_LLVM_convert) {
+ Out << FS << Op.getArg(0);
+ Out << FS << dwarf::AttributeEncodingString(Op.getArg(1));
+ } else {
+ for (unsigned A = 0, AE = Op.getNumArgs(); A != AE; ++A)
+ Out << FS << Op.getArg(A);
+ }
+ }
+ } else {
+ for (const auto &I : N->getElements())
+ Out << FS << I;
+ }
+ Out << ")";
+}
+
+static void writeDIArgList(raw_ostream &Out, const DIArgList *N,
+ AsmWriterContext &WriterCtx,
+ bool FromValue = false) {
+ assert(FromValue &&
+ "Unexpected DIArgList metadata outside of value argument");
+ Out << "!DIArgList(";
+ FieldSeparator FS;
+ MDFieldPrinter Printer(Out, WriterCtx);
+ for (Metadata *Arg : N->getArgs()) {
+ Out << FS;
+ WriteAsOperandInternal(Out, Arg, WriterCtx, true);
+ }
+ Out << ")";
+}
+
+static void writeDIGlobalVariableExpression(raw_ostream &Out,
+ const DIGlobalVariableExpression *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIGlobalVariableExpression(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printMetadata("var", N->getVariable());
+ Printer.printMetadata("expr", N->getExpression());
+ Out << ")";
+}
+
+static void writeDIObjCProperty(raw_ostream &Out, const DIObjCProperty *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIObjCProperty(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printString("setter", N->getSetterName());
+ Printer.printString("getter", N->getGetterName());
+ Printer.printInt("attributes", N->getAttributes());
+ Printer.printMetadata("type", N->getRawType());
+ Out << ")";
+}
+
+static void writeDIImportedEntity(raw_ostream &Out, const DIImportedEntity *N,
+ AsmWriterContext &WriterCtx) {
+ Out << "!DIImportedEntity(";
+ MDFieldPrinter Printer(Out, WriterCtx);
+ Printer.printTag(N);
+ Printer.printString("name", N->getName());
+ Printer.printMetadata("scope", N->getRawScope(), /* ShouldSkipNull */ false);
+ Printer.printMetadata("entity", N->getRawEntity());
+ Printer.printMetadata("file", N->getRawFile());
+ Printer.printInt("line", N->getLine());
+ Printer.printMetadata("elements", N->getRawElements());
+ Out << ")";
+}
+
+static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
+ AsmWriterContext &Ctx) {
+ if (Node->isDistinct())
+ Out << "distinct ";
+ else if (Node->isTemporary())
+ Out << "<temporary!> "; // Handle broken code.
+
+ switch (Node->getMetadataID()) {
+ default:
+ llvm_unreachable("Expected uniquable MDNode");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ write##CLASS(Out, cast<CLASS>(Node), Ctx); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+// Full implementation of printing a Value as an operand with support for
+// TypePrinting, etc.
+static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
+ AsmWriterContext &WriterCtx) {
+ if (V->hasName()) {
+ PrintLLVMName(Out, V);
+ return;
+ }
+
+ const Constant *CV = dyn_cast<Constant>(V);
+ if (CV && !isa<GlobalValue>(CV)) {
+ assert(WriterCtx.TypePrinter && "Constants require TypePrinting!");
+ WriteConstantInternal(Out, CV, WriterCtx);
+ return;
+ }
+
+ if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
+ Out << "asm ";
+ if (IA->hasSideEffects())
+ Out << "sideeffect ";
+ if (IA->isAlignStack())
+ Out << "alignstack ";
+ // We don't emit the AD_ATT dialect as it's the assumed default.
+ if (IA->getDialect() == InlineAsm::AD_Intel)
+ Out << "inteldialect ";
+ if (IA->canThrow())
+ Out << "unwind ";
+ Out << '"';
+ printEscapedString(IA->getAsmString(), Out);
+ Out << "\", \"";
+ printEscapedString(IA->getConstraintString(), Out);
+ Out << '"';
+ return;
+ }
+
+ if (auto *MD = dyn_cast<MetadataAsValue>(V)) {
+ WriteAsOperandInternal(Out, MD->getMetadata(), WriterCtx,
+ /* FromValue */ true);
+ return;
+ }
+
+ char Prefix = '%';
+ int Slot;
+ auto *Machine = WriterCtx.Machine;
+ // If we have a SlotTracker, use it.
+ if (Machine) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ Slot = Machine->getGlobalSlot(GV);
+ Prefix = '@';
+ } else {
+ Slot = Machine->getLocalSlot(V);
+
+ // If the local value didn't succeed, then we may be referring to a value
+ // from a different function. Translate it, as this can happen when using
+ // address of blocks.
+ if (Slot == -1)
+ if ((Machine = createSlotTracker(V))) {
+ Slot = Machine->getLocalSlot(V);
+ delete Machine;
+ }
+ }
+ } else if ((Machine = createSlotTracker(V))) {
+ // Otherwise, create one to get the # and then destroy it.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ Slot = Machine->getGlobalSlot(GV);
+ Prefix = '@';
+ } else {
+ Slot = Machine->getLocalSlot(V);
+ }
+ delete Machine;
+ Machine = nullptr;
+ } else {
+ Slot = -1;
+ }
+
+ if (Slot != -1)
+ Out << Prefix << Slot;
+ else
+ Out << "<badref>";
+}
+
+static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
+ AsmWriterContext &WriterCtx,
+ bool FromValue) {
+ // Write DIExpressions and DIArgLists inline when used as a value. Improves
+ // readability of debug info intrinsics.
+ if (const DIExpression *Expr = dyn_cast<DIExpression>(MD)) {
+ writeDIExpression(Out, Expr, WriterCtx);
+ return;
+ }
+ if (const DIArgList *ArgList = dyn_cast<DIArgList>(MD)) {
+ writeDIArgList(Out, ArgList, WriterCtx, FromValue);
+ return;
+ }
+
+ if (const MDNode *N = dyn_cast<MDNode>(MD)) {
+ std::unique_ptr<SlotTracker> MachineStorage;
+ SaveAndRestore<SlotTracker *> SARMachine(WriterCtx.Machine);
+ if (!WriterCtx.Machine) {
+ MachineStorage = std::make_unique<SlotTracker>(WriterCtx.Context);
+ WriterCtx.Machine = MachineStorage.get();
+ }
+ int Slot = WriterCtx.Machine->getMetadataSlot(N);
+ if (Slot == -1) {
+ if (const DILocation *Loc = dyn_cast<DILocation>(N)) {
+ writeDILocation(Out, Loc, WriterCtx);
+ return;
+ }
+ // Give the pointer value instead of "badref", since this comes up all
+ // the time when debugging.
+ Out << "<" << N << ">";
+ } else
+ Out << '!' << Slot;
+ return;
+ }
+
+ if (const MDString *MDS = dyn_cast<MDString>(MD)) {
+ Out << "!\"";
+ printEscapedString(MDS->getString(), Out);
+ Out << '"';
+ return;
+ }
+
+ auto *V = cast<ValueAsMetadata>(MD);
+ assert(WriterCtx.TypePrinter && "TypePrinter required for metadata values");
+ assert((FromValue || !isa<LocalAsMetadata>(V)) &&
+ "Unexpected function-local metadata outside of value argument");
+
+ WriterCtx.TypePrinter->print(V->getValue()->getType(), Out);
+ Out << ' ';
+ WriteAsOperandInternal(Out, V->getValue(), WriterCtx);
+}
+
+namespace {
+
+class AssemblyWriter {
+ formatted_raw_ostream &Out;
+ const Module *TheModule = nullptr;
+ const ModuleSummaryIndex *TheIndex = nullptr;
+ std::unique_ptr<SlotTracker> SlotTrackerStorage;
+ SlotTracker &Machine;
+ TypePrinting TypePrinter;
+ AssemblyAnnotationWriter *AnnotationWriter = nullptr;
+ SetVector<const Comdat *> Comdats;
+ bool IsForDebug;
+ bool ShouldPreserveUseListOrder;
+ UseListOrderMap UseListOrders;
+ SmallVector<StringRef, 8> MDNames;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
+ DenseMap<const GlobalValueSummary *, GlobalValue::GUID> SummaryToGUIDMap;
+
+public:
+ /// Construct an AssemblyWriter with an external SlotTracker
+ AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, const Module *M,
+ AssemblyAnnotationWriter *AAW, bool IsForDebug,
+ bool ShouldPreserveUseListOrder = false);
+
+ AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac,
+ const ModuleSummaryIndex *Index, bool IsForDebug);
+
+ AsmWriterContext getContext() {
+ return AsmWriterContext(&TypePrinter, &Machine, TheModule);
+ }
+
+ void printMDNodeBody(const MDNode *MD);
+ void printNamedMDNode(const NamedMDNode *NMD);
+
+ void printModule(const Module *M);
+
+ void writeOperand(const Value *Op, bool PrintType);
+ void writeParamOperand(const Value *Operand, AttributeSet Attrs);
+ void writeOperandBundles(const CallBase *Call);
+ void writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID);
+ void writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID);
+ void writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SyncScope::ID SSID);
+
+ void writeAllMDNodes();
+ void writeMDNode(unsigned Slot, const MDNode *Node);
+ void writeAttribute(const Attribute &Attr, bool InAttrGroup = false);
+ void writeAttributeSet(const AttributeSet &AttrSet, bool InAttrGroup = false);
+ void writeAllAttributeGroups();
+
+ void printTypeIdentities();
+ void printGlobal(const GlobalVariable *GV);
+ void printAlias(const GlobalAlias *GA);
+ void printIFunc(const GlobalIFunc *GI);
+ void printComdat(const Comdat *C);
+ void printFunction(const Function *F);
+ void printArgument(const Argument *FA, AttributeSet Attrs);
+ void printBasicBlock(const BasicBlock *BB);
+ void printInstructionLine(const Instruction &I);
+ void printInstruction(const Instruction &I);
+
+ void printUseListOrder(const Value *V, const std::vector<unsigned> &Shuffle);
+ void printUseLists(const Function *F);
+
+ void printModuleSummaryIndex();
+ void printSummaryInfo(unsigned Slot, const ValueInfo &VI);
+ void printSummary(const GlobalValueSummary &Summary);
+ void printAliasSummary(const AliasSummary *AS);
+ void printGlobalVarSummary(const GlobalVarSummary *GS);
+ void printFunctionSummary(const FunctionSummary *FS);
+ void printTypeIdSummary(const TypeIdSummary &TIS);
+ void printTypeIdCompatibleVtableSummary(const TypeIdCompatibleVtableInfo &TI);
+ void printTypeTestResolution(const TypeTestResolution &TTRes);
+ void printArgs(const std::vector<uint64_t> &Args);
+ void printWPDRes(const WholeProgramDevirtResolution &WPDRes);
+ void printTypeIdInfo(const FunctionSummary::TypeIdInfo &TIDInfo);
+ void printVFuncId(const FunctionSummary::VFuncId VFId);
+ void
+ printNonConstVCalls(const std::vector<FunctionSummary::VFuncId> &VCallList,
+ const char *Tag);
+ void
+ printConstVCalls(const std::vector<FunctionSummary::ConstVCall> &VCallList,
+ const char *Tag);
+
+private:
+ /// Print out metadata attachments.
+ void printMetadataAttachments(
+ const SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs,
+ StringRef Separator);
+
+ // printInfoComment - Print a little comment after the instruction indicating
+ // which slot it occupies.
+ void printInfoComment(const Value &V);
+
+ // printGCRelocateComment - print comment after call to the gc.relocate
+ // intrinsic indicating base and derived pointer names.
+ void printGCRelocateComment(const GCRelocateInst &Relocate);
+};
+
+} // end anonymous namespace
+
+AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac,
+ const Module *M, AssemblyAnnotationWriter *AAW,
+ bool IsForDebug, bool ShouldPreserveUseListOrder)
+ : Out(o), TheModule(M), Machine(Mac), TypePrinter(M), AnnotationWriter(AAW),
+ IsForDebug(IsForDebug),
+ ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {
+ if (!TheModule)
+ return;
+ for (const GlobalObject &GO : TheModule->global_objects())
+ if (const Comdat *C = GO.getComdat())
+ Comdats.insert(C);
+}
+
+AssemblyWriter::AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac,
+ const ModuleSummaryIndex *Index, bool IsForDebug)
+ : Out(o), TheIndex(Index), Machine(Mac), TypePrinter(/*Module=*/nullptr),
+ IsForDebug(IsForDebug), ShouldPreserveUseListOrder(false) {}
+
+void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
+ if (!Operand) {
+ Out << "<null operand!>";
+ return;
+ }
+ if (PrintType) {
+ TypePrinter.print(Operand->getType(), Out);
+ Out << ' ';
+ }
+ auto WriterCtx = getContext();
+ WriteAsOperandInternal(Out, Operand, WriterCtx);
+}
+
+void AssemblyWriter::writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
+
+ Out << " syncscope(\"";
+ printEscapedString(SSNs[SSID], Out);
+ Out << "\")";
+ break;
+ }
+ }
+}
+
+void AssemblyWriter::writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID) {
+ if (Ordering == AtomicOrdering::NotAtomic)
+ return;
+
+ writeSyncScope(Context, SSID);
+ Out << " " << toIRString(Ordering);
+}
+
+void AssemblyWriter::writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SyncScope::ID SSID) {
+ assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
+ FailureOrdering != AtomicOrdering::NotAtomic);
+
+ writeSyncScope(Context, SSID);
+ Out << " " << toIRString(SuccessOrdering);
+ Out << " " << toIRString(FailureOrdering);
+}
+
+void AssemblyWriter::writeParamOperand(const Value *Operand,
+ AttributeSet Attrs) {
+ if (!Operand) {
+ Out << "<null operand!>";
+ return;
+ }
+
+ // Print the type
+ TypePrinter.print(Operand->getType(), Out);
+ // Print parameter attributes list
+ if (Attrs.hasAttributes()) {
+ Out << ' ';
+ writeAttributeSet(Attrs);
+ }
+ Out << ' ';
+ // Print the operand
+ auto WriterCtx = getContext();
+ WriteAsOperandInternal(Out, Operand, WriterCtx);
+}
+
+void AssemblyWriter::writeOperandBundles(const CallBase *Call) {
+ if (!Call->hasOperandBundles())
+ return;
+
+ Out << " [ ";
+
+ bool FirstBundle = true;
+ for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) {
+ OperandBundleUse BU = Call->getOperandBundleAt(i);
+
+ if (!FirstBundle)
+ Out << ", ";
+ FirstBundle = false;
+
+ Out << '"';
+ printEscapedString(BU.getTagName(), Out);
+ Out << '"';
+
+ Out << '(';
+
+ bool FirstInput = true;
+ auto WriterCtx = getContext();
+ for (const auto &Input : BU.Inputs) {
+ if (!FirstInput)
+ Out << ", ";
+ FirstInput = false;
+
+ TypePrinter.print(Input->getType(), Out);
+ Out << " ";
+ WriteAsOperandInternal(Out, Input, WriterCtx);
+ }
+
+ Out << ')';
+ }
+
+ Out << " ]";
+}
+
+void AssemblyWriter::printModule(const Module *M) {
+ Machine.initializeIfNeeded();
+
+ if (ShouldPreserveUseListOrder)
+ UseListOrders = predictUseListOrder(M);
+
+ if (!M->getModuleIdentifier().empty() &&
+ // Don't print the ID if it will start a new line (which would
+ // require a comment char before it).
+ M->getModuleIdentifier().find('\n') == std::string::npos)
+ Out << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
+
+ if (!M->getSourceFileName().empty()) {
+ Out << "source_filename = \"";
+ printEscapedString(M->getSourceFileName(), Out);
+ Out << "\"\n";
+ }
+
+ const std::string &DL = M->getDataLayoutStr();
+ if (!DL.empty())
+ Out << "target datalayout = \"" << DL << "\"\n";
+ if (!M->getTargetTriple().empty())
+ Out << "target triple = \"" << M->getTargetTriple() << "\"\n";
+
+ if (!M->getModuleInlineAsm().empty()) {
+ Out << '\n';
+
+ // Split the string into lines, to make it easier to read the .ll file.
+ StringRef Asm = M->getModuleInlineAsm();
+ do {
+ StringRef Front;
+ std::tie(Front, Asm) = Asm.split('\n');
+
+ // We found a newline, print the portion of the asm string from the
+ // last newline up to this newline.
+ Out << "module asm \"";
+ printEscapedString(Front, Out);
+ Out << "\"\n";
+ } while (!Asm.empty());
+ }
+
+ printTypeIdentities();
+
+ // Output all comdats.
+ if (!Comdats.empty())
+ Out << '\n';
+ for (const Comdat *C : Comdats) {
+ printComdat(C);
+ if (C != Comdats.back())
+ Out << '\n';
+ }
+
+ // Output all globals.
+ if (!M->global_empty()) Out << '\n';
+ for (const GlobalVariable &GV : M->globals()) {
+ printGlobal(&GV); Out << '\n';
+ }
+
+ // Output all aliases.
+ if (!M->alias_empty()) Out << "\n";
+ for (const GlobalAlias &GA : M->aliases())
+ printAlias(&GA);
+
+ // Output all ifuncs.
+ if (!M->ifunc_empty()) Out << "\n";
+ for (const GlobalIFunc &GI : M->ifuncs())
+ printIFunc(&GI);
+
+ // Output all of the functions.
+ for (const Function &F : *M) {
+ Out << '\n';
+ printFunction(&F);
+ }
+
+ // Output global use-lists.
+ printUseLists(nullptr);
+
+ // Output all attribute groups.
+ if (!Machine.as_empty()) {
+ Out << '\n';
+ writeAllAttributeGroups();
+ }
+
+ // Output named metadata.
+ if (!M->named_metadata_empty()) Out << '\n';
+
+ for (const NamedMDNode &Node : M->named_metadata())
+ printNamedMDNode(&Node);
+
+ // Output metadata.
+ if (!Machine.mdn_empty()) {
+ Out << '\n';
+ writeAllMDNodes();
+ }
+}
+
+void AssemblyWriter::printModuleSummaryIndex() {
+ assert(TheIndex);
+ int NumSlots = Machine.initializeIndexIfNeeded();
+
+ Out << "\n";
+
+ // Print module path entries. To print in order, add paths to a vector
+ // indexed by module slot.
+ std::vector<std::pair<std::string, ModuleHash>> moduleVec;
+ std::string RegularLTOModuleName =
+ ModuleSummaryIndex::getRegularLTOModuleName();
+ moduleVec.resize(TheIndex->modulePaths().size());
+ for (auto &ModPath : TheIndex->modulePaths())
+ moduleVec[Machine.getModulePathSlot(ModPath.first())] = std::make_pair(
+ // A module id of -1 is a special entry for a regular LTO module created
+ // during the thin link.
+ ModPath.second.first == -1u ? RegularLTOModuleName
+ : (std::string)std::string(ModPath.first()),
+ ModPath.second.second);
+
+ unsigned i = 0;
+ for (auto &ModPair : moduleVec) {
+ Out << "^" << i++ << " = module: (";
+ Out << "path: \"";
+ printEscapedString(ModPair.first, Out);
+ Out << "\", hash: (";
+ FieldSeparator FS;
+ for (auto Hash : ModPair.second)
+ Out << FS << Hash;
+ Out << "))\n";
+ }
+
+ // FIXME: Change AliasSummary to hold a ValueInfo instead of summary pointer
+ // for aliasee (then update BitcodeWriter.cpp and remove get/setAliaseeGUID).
+ for (auto &GlobalList : *TheIndex) {
+ auto GUID = GlobalList.first;
+ for (auto &Summary : GlobalList.second.SummaryList)
+ SummaryToGUIDMap[Summary.get()] = GUID;
+ }
+
+ // Print the global value summary entries.
+ for (auto &GlobalList : *TheIndex) {
+ auto GUID = GlobalList.first;
+ auto VI = TheIndex->getValueInfo(GlobalList);
+ printSummaryInfo(Machine.getGUIDSlot(GUID), VI);
+ }
+
+ // Print the TypeIdMap entries.
+ for (const auto &TID : TheIndex->typeIds()) {
+ Out << "^" << Machine.getTypeIdSlot(TID.second.first)
+ << " = typeid: (name: \"" << TID.second.first << "\"";
+ printTypeIdSummary(TID.second.second);
+ Out << ") ; guid = " << TID.first << "\n";
+ }
+
+ // Print the TypeIdCompatibleVtableMap entries.
+ for (auto &TId : TheIndex->typeIdCompatibleVtableMap()) {
+ auto GUID = GlobalValue::getGUID(TId.first);
+ Out << "^" << Machine.getGUIDSlot(GUID)
+ << " = typeidCompatibleVTable: (name: \"" << TId.first << "\"";
+ printTypeIdCompatibleVtableSummary(TId.second);
+ Out << ") ; guid = " << GUID << "\n";
+ }
+
+ // Don't emit flags when it's not really needed (value is zero by default).
+ if (TheIndex->getFlags()) {
+ Out << "^" << NumSlots << " = flags: " << TheIndex->getFlags() << "\n";
+ ++NumSlots;
+ }
+
+ Out << "^" << NumSlots << " = blockcount: " << TheIndex->getBlockCount()
+ << "\n";
+}
+
+static const char *
+getWholeProgDevirtResKindName(WholeProgramDevirtResolution::Kind K) {
+ switch (K) {
+ case WholeProgramDevirtResolution::Indir:
+ return "indir";
+ case WholeProgramDevirtResolution::SingleImpl:
+ return "singleImpl";
+ case WholeProgramDevirtResolution::BranchFunnel:
+ return "branchFunnel";
+ }
+ llvm_unreachable("invalid WholeProgramDevirtResolution kind");
+}
+
+static const char *getWholeProgDevirtResByArgKindName(
+ WholeProgramDevirtResolution::ByArg::Kind K) {
+ switch (K) {
+ case WholeProgramDevirtResolution::ByArg::Indir:
+ return "indir";
+ case WholeProgramDevirtResolution::ByArg::UniformRetVal:
+ return "uniformRetVal";
+ case WholeProgramDevirtResolution::ByArg::UniqueRetVal:
+ return "uniqueRetVal";
+ case WholeProgramDevirtResolution::ByArg::VirtualConstProp:
+ return "virtualConstProp";
+ }
+ llvm_unreachable("invalid WholeProgramDevirtResolution::ByArg kind");
+}
+
+static const char *getTTResKindName(TypeTestResolution::Kind K) {
+ switch (K) {
+ case TypeTestResolution::Unknown:
+ return "unknown";
+ case TypeTestResolution::Unsat:
+ return "unsat";
+ case TypeTestResolution::ByteArray:
+ return "byteArray";
+ case TypeTestResolution::Inline:
+ return "inline";
+ case TypeTestResolution::Single:
+ return "single";
+ case TypeTestResolution::AllOnes:
+ return "allOnes";
+ }
+ llvm_unreachable("invalid TypeTestResolution kind");
+}
+
+void AssemblyWriter::printTypeTestResolution(const TypeTestResolution &TTRes) {
+ Out << "typeTestRes: (kind: " << getTTResKindName(TTRes.TheKind)
+ << ", sizeM1BitWidth: " << TTRes.SizeM1BitWidth;
+
+ // The following fields are only used if the target does not support the use
+ // of absolute symbols to store constants. Print only if non-zero.
+ if (TTRes.AlignLog2)
+ Out << ", alignLog2: " << TTRes.AlignLog2;
+ if (TTRes.SizeM1)
+ Out << ", sizeM1: " << TTRes.SizeM1;
+ if (TTRes.BitMask)
+ // BitMask is uint8_t which causes it to print the corresponding char.
+ Out << ", bitMask: " << (unsigned)TTRes.BitMask;
+ if (TTRes.InlineBits)
+ Out << ", inlineBits: " << TTRes.InlineBits;
+
+ Out << ")";
+}
+
+void AssemblyWriter::printTypeIdSummary(const TypeIdSummary &TIS) {
+ Out << ", summary: (";
+ printTypeTestResolution(TIS.TTRes);
+ if (!TIS.WPDRes.empty()) {
+ Out << ", wpdResolutions: (";
+ FieldSeparator FS;
+ for (auto &WPDRes : TIS.WPDRes) {
+ Out << FS;
+ Out << "(offset: " << WPDRes.first << ", ";
+ printWPDRes(WPDRes.second);
+ Out << ")";
+ }
+ Out << ")";
+ }
+ Out << ")";
+}
+
+void AssemblyWriter::printTypeIdCompatibleVtableSummary(
+ const TypeIdCompatibleVtableInfo &TI) {
+ Out << ", summary: (";
+ FieldSeparator FS;
+ for (auto &P : TI) {
+ Out << FS;
+ Out << "(offset: " << P.AddressPointOffset << ", ";
+ Out << "^" << Machine.getGUIDSlot(P.VTableVI.getGUID());
+ Out << ")";
+ }
+ Out << ")";
+}
+
+void AssemblyWriter::printArgs(const std::vector<uint64_t> &Args) {
+ Out << "args: (";
+ FieldSeparator FS;
+ for (auto arg : Args) {
+ Out << FS;
+ Out << arg;
+ }
+ Out << ")";
+}
+
+void AssemblyWriter::printWPDRes(const WholeProgramDevirtResolution &WPDRes) {
+ Out << "wpdRes: (kind: ";
+ Out << getWholeProgDevirtResKindName(WPDRes.TheKind);
+
+ if (WPDRes.TheKind == WholeProgramDevirtResolution::SingleImpl)
+ Out << ", singleImplName: \"" << WPDRes.SingleImplName << "\"";
+
+ if (!WPDRes.ResByArg.empty()) {
+ Out << ", resByArg: (";
+ FieldSeparator FS;
+ for (auto &ResByArg : WPDRes.ResByArg) {
+ Out << FS;
+ printArgs(ResByArg.first);
+ Out << ", byArg: (kind: ";
+ Out << getWholeProgDevirtResByArgKindName(ResByArg.second.TheKind);
+ if (ResByArg.second.TheKind ==
+ WholeProgramDevirtResolution::ByArg::UniformRetVal ||
+ ResByArg.second.TheKind ==
+ WholeProgramDevirtResolution::ByArg::UniqueRetVal)
+ Out << ", info: " << ResByArg.second.Info;
+
+ // The following fields are only used if the target does not support the
+ // use of absolute symbols to store constants. Print only if non-zero.
+ if (ResByArg.second.Byte || ResByArg.second.Bit)
+ Out << ", byte: " << ResByArg.second.Byte
+ << ", bit: " << ResByArg.second.Bit;
+
+ Out << ")";
+ }
+ Out << ")";
+ }
+ Out << ")";
+}
+
+static const char *getSummaryKindName(GlobalValueSummary::SummaryKind SK) {
+ switch (SK) {
+ case GlobalValueSummary::AliasKind:
+ return "alias";
+ case GlobalValueSummary::FunctionKind:
+ return "function";
+ case GlobalValueSummary::GlobalVarKind:
+ return "variable";
+ }
+ llvm_unreachable("invalid summary kind");
+}
+
+void AssemblyWriter::printAliasSummary(const AliasSummary *AS) {
+ Out << ", aliasee: ";
+ // The indexes emitted for distributed backends may not include the
+ // aliasee summary (only if it is being imported directly). Handle
+ // that case by just emitting "null" as the aliasee.
+ if (AS->hasAliasee())
+ Out << "^" << Machine.getGUIDSlot(SummaryToGUIDMap[&AS->getAliasee()]);
+ else
+ Out << "null";
+}
+
+void AssemblyWriter::printGlobalVarSummary(const GlobalVarSummary *GS) {
+ auto VTableFuncs = GS->vTableFuncs();
+ Out << ", varFlags: (readonly: " << GS->VarFlags.MaybeReadOnly << ", "
+ << "writeonly: " << GS->VarFlags.MaybeWriteOnly << ", "
+ << "constant: " << GS->VarFlags.Constant;
+ if (!VTableFuncs.empty())
+ Out << ", "
+ << "vcall_visibility: " << GS->VarFlags.VCallVisibility;
+ Out << ")";
+
+ if (!VTableFuncs.empty()) {
+ Out << ", vTableFuncs: (";
+ FieldSeparator FS;
+ for (auto &P : VTableFuncs) {
+ Out << FS;
+ Out << "(virtFunc: ^" << Machine.getGUIDSlot(P.FuncVI.getGUID())
+ << ", offset: " << P.VTableOffset;
+ Out << ")";
+ }
+ Out << ")";
+ }
+}
+
+static std::string getLinkageName(GlobalValue::LinkageTypes LT) {
+ switch (LT) {
+ case GlobalValue::ExternalLinkage:
+ return "external";
+ case GlobalValue::PrivateLinkage:
+ return "private";
+ case GlobalValue::InternalLinkage:
+ return "internal";
+ case GlobalValue::LinkOnceAnyLinkage:
+ return "linkonce";
+ case GlobalValue::LinkOnceODRLinkage:
+ return "linkonce_odr";
+ case GlobalValue::WeakAnyLinkage:
+ return "weak";
+ case GlobalValue::WeakODRLinkage:
+ return "weak_odr";
+ case GlobalValue::CommonLinkage:
+ return "common";
+ case GlobalValue::AppendingLinkage:
+ return "appending";
+ case GlobalValue::ExternalWeakLinkage:
+ return "extern_weak";
+ case GlobalValue::AvailableExternallyLinkage:
+ return "available_externally";
+ }
+ llvm_unreachable("invalid linkage");
+}
+
+// When printing the linkage types in IR where the ExternalLinkage is
+// not printed, and other linkage types are expected to be printed with
+// a space after the name.
+static std::string getLinkageNameWithSpace(GlobalValue::LinkageTypes LT) {
+ if (LT == GlobalValue::ExternalLinkage)
+ return "";
+ return getLinkageName(LT) + " ";
+}
+
+static const char *getVisibilityName(GlobalValue::VisibilityTypes Vis) {
+ switch (Vis) {
+ case GlobalValue::DefaultVisibility:
+ return "default";
+ case GlobalValue::HiddenVisibility:
+ return "hidden";
+ case GlobalValue::ProtectedVisibility:
+ return "protected";
+ }
+ llvm_unreachable("invalid visibility");
+}
+
+void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) {
+ Out << ", insts: " << FS->instCount();
+ if (FS->fflags().anyFlagSet())
+ Out << ", " << FS->fflags();
+
+ if (!FS->calls().empty()) {
+ Out << ", calls: (";
+ FieldSeparator IFS;
+ for (auto &Call : FS->calls()) {
+ Out << IFS;
+ Out << "(callee: ^" << Machine.getGUIDSlot(Call.first.getGUID());
+ if (Call.second.getHotness() != CalleeInfo::HotnessType::Unknown)
+ Out << ", hotness: " << getHotnessName(Call.second.getHotness());
+ else if (Call.second.RelBlockFreq)
+ Out << ", relbf: " << Call.second.RelBlockFreq;
+ Out << ")";
+ }
+ Out << ")";
+ }
+
+ if (const auto *TIdInfo = FS->getTypeIdInfo())
+ printTypeIdInfo(*TIdInfo);
+
+ auto PrintRange = [&](const ConstantRange &Range) {
+ Out << "[" << Range.getSignedMin() << ", " << Range.getSignedMax() << "]";
+ };
+
+ if (!FS->paramAccesses().empty()) {
+ Out << ", params: (";
+ FieldSeparator IFS;
+ for (auto &PS : FS->paramAccesses()) {
+ Out << IFS;
+ Out << "(param: " << PS.ParamNo;
+ Out << ", offset: ";
+ PrintRange(PS.Use);
+ if (!PS.Calls.empty()) {
+ Out << ", calls: (";
+ FieldSeparator IFS;
+ for (auto &Call : PS.Calls) {
+ Out << IFS;
+ Out << "(callee: ^" << Machine.getGUIDSlot(Call.Callee.getGUID());
+ Out << ", param: " << Call.ParamNo;
+ Out << ", offset: ";
+ PrintRange(Call.Offsets);
+ Out << ")";
+ }
+ Out << ")";
+ }
+ Out << ")";
+ }
+ Out << ")";
+ }
+}
+
+void AssemblyWriter::printTypeIdInfo(
+ const FunctionSummary::TypeIdInfo &TIDInfo) {
+ Out << ", typeIdInfo: (";
+ FieldSeparator TIDFS;
+ if (!TIDInfo.TypeTests.empty()) {
+ Out << TIDFS;
+ Out << "typeTests: (";
+ FieldSeparator FS;
+ for (auto &GUID : TIDInfo.TypeTests) {
+ auto TidIter = TheIndex->typeIds().equal_range(GUID);
+ if (TidIter.first == TidIter.second) {
+ Out << FS;
+ Out << GUID;
+ continue;
+ }
+ // Print all type id that correspond to this GUID.
+ for (auto It = TidIter.first; It != TidIter.second; ++It) {
+ Out << FS;
+ auto Slot = Machine.getTypeIdSlot(It->second.first);
+ assert(Slot != -1);
+ Out << "^" << Slot;
+ }
+ }
+ Out << ")";
+ }
+ if (!TIDInfo.TypeTestAssumeVCalls.empty()) {
+ Out << TIDFS;
+ printNonConstVCalls(TIDInfo.TypeTestAssumeVCalls, "typeTestAssumeVCalls");
+ }
+ if (!TIDInfo.TypeCheckedLoadVCalls.empty()) {
+ Out << TIDFS;
+ printNonConstVCalls(TIDInfo.TypeCheckedLoadVCalls, "typeCheckedLoadVCalls");
+ }
+ if (!TIDInfo.TypeTestAssumeConstVCalls.empty()) {
+ Out << TIDFS;
+ printConstVCalls(TIDInfo.TypeTestAssumeConstVCalls,
+ "typeTestAssumeConstVCalls");
+ }
+ if (!TIDInfo.TypeCheckedLoadConstVCalls.empty()) {
+ Out << TIDFS;
+ printConstVCalls(TIDInfo.TypeCheckedLoadConstVCalls,
+ "typeCheckedLoadConstVCalls");
+ }
+ Out << ")";
+}
+
+void AssemblyWriter::printVFuncId(const FunctionSummary::VFuncId VFId) {
+ auto TidIter = TheIndex->typeIds().equal_range(VFId.GUID);
+ if (TidIter.first == TidIter.second) {
+ Out << "vFuncId: (";
+ Out << "guid: " << VFId.GUID;
+ Out << ", offset: " << VFId.Offset;
+ Out << ")";
+ return;
+ }
+ // Print all type id that correspond to this GUID.
+ FieldSeparator FS;
+ for (auto It = TidIter.first; It != TidIter.second; ++It) {
+ Out << FS;
+ Out << "vFuncId: (";
+ auto Slot = Machine.getTypeIdSlot(It->second.first);
+ assert(Slot != -1);
+ Out << "^" << Slot;
+ Out << ", offset: " << VFId.Offset;
+ Out << ")";
+ }
+}
+
+void AssemblyWriter::printNonConstVCalls(
+ const std::vector<FunctionSummary::VFuncId> &VCallList, const char *Tag) {
+ Out << Tag << ": (";
+ FieldSeparator FS;
+ for (auto &VFuncId : VCallList) {
+ Out << FS;
+ printVFuncId(VFuncId);
+ }
+ Out << ")";
+}
+
+void AssemblyWriter::printConstVCalls(
+ const std::vector<FunctionSummary::ConstVCall> &VCallList,
+ const char *Tag) {
+ Out << Tag << ": (";
+ FieldSeparator FS;
+ for (auto &ConstVCall : VCallList) {
+ Out << FS;
+ Out << "(";
+ printVFuncId(ConstVCall.VFunc);
+ if (!ConstVCall.Args.empty()) {
+ Out << ", ";
+ printArgs(ConstVCall.Args);
+ }
+ Out << ")";
+ }
+ Out << ")";
+}
+
+void AssemblyWriter::printSummary(const GlobalValueSummary &Summary) {
+ GlobalValueSummary::GVFlags GVFlags = Summary.flags();
+ GlobalValue::LinkageTypes LT = (GlobalValue::LinkageTypes)GVFlags.Linkage;
+ Out << getSummaryKindName(Summary.getSummaryKind()) << ": ";
+ Out << "(module: ^" << Machine.getModulePathSlot(Summary.modulePath())
+ << ", flags: (";
+ Out << "linkage: " << getLinkageName(LT);
+ Out << ", visibility: "
+ << getVisibilityName((GlobalValue::VisibilityTypes)GVFlags.Visibility);
+ Out << ", notEligibleToImport: " << GVFlags.NotEligibleToImport;
+ Out << ", live: " << GVFlags.Live;
+ Out << ", dsoLocal: " << GVFlags.DSOLocal;
+ Out << ", canAutoHide: " << GVFlags.CanAutoHide;
+ Out << ")";
+
+ if (Summary.getSummaryKind() == GlobalValueSummary::AliasKind)
+ printAliasSummary(cast<AliasSummary>(&Summary));
+ else if (Summary.getSummaryKind() == GlobalValueSummary::FunctionKind)
+ printFunctionSummary(cast<FunctionSummary>(&Summary));
+ else
+ printGlobalVarSummary(cast<GlobalVarSummary>(&Summary));
+
+ auto RefList = Summary.refs();
+ if (!RefList.empty()) {
+ Out << ", refs: (";
+ FieldSeparator FS;
+ for (auto &Ref : RefList) {
+ Out << FS;
+ if (Ref.isReadOnly())
+ Out << "readonly ";
+ else if (Ref.isWriteOnly())
+ Out << "writeonly ";
+ Out << "^" << Machine.getGUIDSlot(Ref.getGUID());
+ }
+ Out << ")";
+ }
+
+ Out << ")";
+}
+
+void AssemblyWriter::printSummaryInfo(unsigned Slot, const ValueInfo &VI) {
+ Out << "^" << Slot << " = gv: (";
+ if (!VI.name().empty())
+ Out << "name: \"" << VI.name() << "\"";
+ else
+ Out << "guid: " << VI.getGUID();
+ if (!VI.getSummaryList().empty()) {
+ Out << ", summaries: (";
+ FieldSeparator FS;
+ for (auto &Summary : VI.getSummaryList()) {
+ Out << FS;
+ printSummary(*Summary);
+ }
+ Out << ")";
+ }
+ Out << ")";
+ if (!VI.name().empty())
+ Out << " ; guid = " << VI.getGUID();
+ Out << "\n";
+}
+
+static void printMetadataIdentifier(StringRef Name,
+ formatted_raw_ostream &Out) {
+ if (Name.empty()) {
+ Out << "<empty name> ";
+ } else {
+ if (isalpha(static_cast<unsigned char>(Name[0])) || Name[0] == '-' ||
+ Name[0] == '$' || Name[0] == '.' || Name[0] == '_')
+ Out << Name[0];
+ else
+ Out << '\\' << hexdigit(Name[0] >> 4) << hexdigit(Name[0] & 0x0F);
+ for (unsigned i = 1, e = Name.size(); i != e; ++i) {
+ unsigned char C = Name[i];
+ if (isalnum(static_cast<unsigned char>(C)) || C == '-' || C == '$' ||
+ C == '.' || C == '_')
+ Out << C;
+ else
+ Out << '\\' << hexdigit(C >> 4) << hexdigit(C & 0x0F);
+ }
+ }
+}
+
+void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) {
+ Out << '!';
+ printMetadataIdentifier(NMD->getName(), Out);
+ Out << " = !{";
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ if (i)
+ Out << ", ";
+
+ // Write DIExpressions inline.
+ // FIXME: Ban DIExpressions in NamedMDNodes, they will serve no purpose.
+ MDNode *Op = NMD->getOperand(i);
+ assert(!isa<DIArgList>(Op) &&
+ "DIArgLists should not appear in NamedMDNodes");
+ if (auto *Expr = dyn_cast<DIExpression>(Op)) {
+ writeDIExpression(Out, Expr, AsmWriterContext::getEmpty());
+ continue;
+ }
+
+ int Slot = Machine.getMetadataSlot(Op);
+ if (Slot == -1)
+ Out << "<badref>";
+ else
+ Out << '!' << Slot;
+ }
+ Out << "}\n";
+}
+
+static void PrintVisibility(GlobalValue::VisibilityTypes Vis,
+ formatted_raw_ostream &Out) {
+ switch (Vis) {
+ case GlobalValue::DefaultVisibility: break;
+ case GlobalValue::HiddenVisibility: Out << "hidden "; break;
+ case GlobalValue::ProtectedVisibility: Out << "protected "; break;
+ }
+}
+
+static void PrintDSOLocation(const GlobalValue &GV,
+ formatted_raw_ostream &Out) {
+ if (GV.isDSOLocal() && !GV.isImplicitDSOLocal())
+ Out << "dso_local ";
+}
+
+static void PrintDLLStorageClass(GlobalValue::DLLStorageClassTypes SCT,
+ formatted_raw_ostream &Out) {
+ switch (SCT) {
+ case GlobalValue::DefaultStorageClass: break;
+ case GlobalValue::DLLImportStorageClass: Out << "dllimport "; break;
+ case GlobalValue::DLLExportStorageClass: Out << "dllexport "; break;
+ }
+}
+
+static void PrintThreadLocalModel(GlobalVariable::ThreadLocalMode TLM,
+ formatted_raw_ostream &Out) {
+ switch (TLM) {
+ case GlobalVariable::NotThreadLocal:
+ break;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ Out << "thread_local ";
+ break;
+ case GlobalVariable::LocalDynamicTLSModel:
+ Out << "thread_local(localdynamic) ";
+ break;
+ case GlobalVariable::InitialExecTLSModel:
+ Out << "thread_local(initialexec) ";
+ break;
+ case GlobalVariable::LocalExecTLSModel:
+ Out << "thread_local(localexec) ";
+ break;
+ }
+}
+
+static StringRef getUnnamedAddrEncoding(GlobalVariable::UnnamedAddr UA) {
+ switch (UA) {
+ case GlobalVariable::UnnamedAddr::None:
+ return "";
+ case GlobalVariable::UnnamedAddr::Local:
+ return "local_unnamed_addr";
+ case GlobalVariable::UnnamedAddr::Global:
+ return "unnamed_addr";
+ }
+ llvm_unreachable("Unknown UnnamedAddr");
+}
+
+static void maybePrintComdat(formatted_raw_ostream &Out,
+ const GlobalObject &GO) {
+ const Comdat *C = GO.getComdat();
+ if (!C)
+ return;
+
+ if (isa<GlobalVariable>(GO))
+ Out << ',';
+ Out << " comdat";
+
+ if (GO.getName() == C->getName())
+ return;
+
+ Out << '(';
+ PrintLLVMName(Out, C->getName(), ComdatPrefix);
+ Out << ')';
+}
+
+void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
+ if (GV->isMaterializable())
+ Out << "; Materializable\n";
+
+ AsmWriterContext WriterCtx(&TypePrinter, &Machine, GV->getParent());
+ WriteAsOperandInternal(Out, GV, WriterCtx);
+ Out << " = ";
+
+ if (!GV->hasInitializer() && GV->hasExternalLinkage())
+ Out << "external ";
+
+ Out << getLinkageNameWithSpace(GV->getLinkage());
+ PrintDSOLocation(*GV, Out);
+ PrintVisibility(GV->getVisibility(), Out);
+ PrintDLLStorageClass(GV->getDLLStorageClass(), Out);
+ PrintThreadLocalModel(GV->getThreadLocalMode(), Out);
+ StringRef UA = getUnnamedAddrEncoding(GV->getUnnamedAddr());
+ if (!UA.empty())
+ Out << UA << ' ';
+
+ if (unsigned AddressSpace = GV->getType()->getAddressSpace())
+ Out << "addrspace(" << AddressSpace << ") ";
+ if (GV->isExternallyInitialized()) Out << "externally_initialized ";
+ Out << (GV->isConstant() ? "constant " : "global ");
+ TypePrinter.print(GV->getValueType(), Out);
+
+ if (GV->hasInitializer()) {
+ Out << ' ';
+ writeOperand(GV->getInitializer(), false);
+ }
+
+ if (GV->hasSection()) {
+ Out << ", section \"";
+ printEscapedString(GV->getSection(), Out);
+ Out << '"';
+ }
+ if (GV->hasPartition()) {
+ Out << ", partition \"";
+ printEscapedString(GV->getPartition(), Out);
+ Out << '"';
+ }
+
+ using SanitizerMetadata = llvm::GlobalValue::SanitizerMetadata;
+ if (GV->hasSanitizerMetadata()) {
+ SanitizerMetadata MD = GV->getSanitizerMetadata();
+ if (MD.NoAddress)
+ Out << ", no_sanitize_address";
+ if (MD.NoHWAddress)
+ Out << ", no_sanitize_hwaddress";
+ if (MD.Memtag)
+ Out << ", sanitize_memtag";
+ if (MD.IsDynInit)
+ Out << ", sanitize_address_dyninit";
+ }
+
+ maybePrintComdat(Out, *GV);
+ if (MaybeAlign A = GV->getAlign())
+ Out << ", align " << A->value();
+
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ GV->getAllMetadata(MDs);
+ printMetadataAttachments(MDs, ", ");
+
+ auto Attrs = GV->getAttributes();
+ if (Attrs.hasAttributes())
+ Out << " #" << Machine.getAttributeGroupSlot(Attrs);
+
+ printInfoComment(*GV);
+}
+
+void AssemblyWriter::printAlias(const GlobalAlias *GA) {
+ if (GA->isMaterializable())
+ Out << "; Materializable\n";
+
+ AsmWriterContext WriterCtx(&TypePrinter, &Machine, GA->getParent());
+ WriteAsOperandInternal(Out, GA, WriterCtx);
+ Out << " = ";
+
+ Out << getLinkageNameWithSpace(GA->getLinkage());
+ PrintDSOLocation(*GA, Out);
+ PrintVisibility(GA->getVisibility(), Out);
+ PrintDLLStorageClass(GA->getDLLStorageClass(), Out);
+ PrintThreadLocalModel(GA->getThreadLocalMode(), Out);
+ StringRef UA = getUnnamedAddrEncoding(GA->getUnnamedAddr());
+ if (!UA.empty())
+ Out << UA << ' ';
+
+ Out << "alias ";
+
+ TypePrinter.print(GA->getValueType(), Out);
+ Out << ", ";
+
+ if (const Constant *Aliasee = GA->getAliasee()) {
+ writeOperand(Aliasee, !isa<ConstantExpr>(Aliasee));
+ } else {
+ TypePrinter.print(GA->getType(), Out);
+ Out << " <<NULL ALIASEE>>";
+ }
+
+ if (GA->hasPartition()) {
+ Out << ", partition \"";
+ printEscapedString(GA->getPartition(), Out);
+ Out << '"';
+ }
+
+ printInfoComment(*GA);
+ Out << '\n';
+}
+
+void AssemblyWriter::printIFunc(const GlobalIFunc *GI) {
+ if (GI->isMaterializable())
+ Out << "; Materializable\n";
+
+ AsmWriterContext WriterCtx(&TypePrinter, &Machine, GI->getParent());
+ WriteAsOperandInternal(Out, GI, WriterCtx);
+ Out << " = ";
+
+ Out << getLinkageNameWithSpace(GI->getLinkage());
+ PrintDSOLocation(*GI, Out);
+ PrintVisibility(GI->getVisibility(), Out);
+
+ Out << "ifunc ";
+
+ TypePrinter.print(GI->getValueType(), Out);
+ Out << ", ";
+
+ if (const Constant *Resolver = GI->getResolver()) {
+ writeOperand(Resolver, !isa<ConstantExpr>(Resolver));
+ } else {
+ TypePrinter.print(GI->getType(), Out);
+ Out << " <<NULL RESOLVER>>";
+ }
+
+ if (GI->hasPartition()) {
+ Out << ", partition \"";
+ printEscapedString(GI->getPartition(), Out);
+ Out << '"';
+ }
+
+ printInfoComment(*GI);
+ Out << '\n';
+}
+
+void AssemblyWriter::printComdat(const Comdat *C) {
+ C->print(Out);
+}
+
+void AssemblyWriter::printTypeIdentities() {
+ if (TypePrinter.empty())
+ return;
+
+ Out << '\n';
+
+ // Emit all numbered types.
+ auto &NumberedTypes = TypePrinter.getNumberedTypes();
+ for (unsigned I = 0, E = NumberedTypes.size(); I != E; ++I) {
+ Out << '%' << I << " = type ";
+
+ // Make sure we print out at least one level of the type structure, so
+ // that we do not get %2 = type %2
+ TypePrinter.printStructBody(NumberedTypes[I], Out);
+ Out << '\n';
+ }
+
+ auto &NamedTypes = TypePrinter.getNamedTypes();
+ for (StructType *NamedType : NamedTypes) {
+ PrintLLVMName(Out, NamedType->getName(), LocalPrefix);
+ Out << " = type ";
+
+ // Make sure we print out at least one level of the type structure, so
+ // that we do not get %FILE = type %FILE
+ TypePrinter.printStructBody(NamedType, Out);
+ Out << '\n';
+ }
+}
+
+/// printFunction - Print all aspects of a function.
+void AssemblyWriter::printFunction(const Function *F) {
+ if (AnnotationWriter) AnnotationWriter->emitFunctionAnnot(F, Out);
+
+ if (F->isMaterializable())
+ Out << "; Materializable\n";
+
+ const AttributeList &Attrs = F->getAttributes();
+ if (Attrs.hasFnAttrs()) {
+ AttributeSet AS = Attrs.getFnAttrs();
+ std::string AttrStr;
+
+ for (const Attribute &Attr : AS) {
+ if (!Attr.isStringAttribute()) {
+ if (!AttrStr.empty()) AttrStr += ' ';
+ AttrStr += Attr.getAsString();
+ }
+ }
+
+ if (!AttrStr.empty())
+ Out << "; Function Attrs: " << AttrStr << '\n';
+ }
+
+ Machine.incorporateFunction(F);
+
+ if (F->isDeclaration()) {
+ Out << "declare";
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ F->getAllMetadata(MDs);
+ printMetadataAttachments(MDs, " ");
+ Out << ' ';
+ } else
+ Out << "define ";
+
+ Out << getLinkageNameWithSpace(F->getLinkage());
+ PrintDSOLocation(*F, Out);
+ PrintVisibility(F->getVisibility(), Out);
+ PrintDLLStorageClass(F->getDLLStorageClass(), Out);
+
+ // Print the calling convention.
+ if (F->getCallingConv() != CallingConv::C) {
+ PrintCallingConv(F->getCallingConv(), Out);
+ Out << " ";
+ }
+
+ FunctionType *FT = F->getFunctionType();
+ if (Attrs.hasRetAttrs())
+ Out << Attrs.getAsString(AttributeList::ReturnIndex) << ' ';
+ TypePrinter.print(F->getReturnType(), Out);
+ AsmWriterContext WriterCtx(&TypePrinter, &Machine, F->getParent());
+ Out << ' ';
+ WriteAsOperandInternal(Out, F, WriterCtx);
+ Out << '(';
+
+ // Loop over the arguments, printing them...
+ if (F->isDeclaration() && !IsForDebug) {
+ // We're only interested in the type here - don't print argument names.
+ for (unsigned I = 0, E = FT->getNumParams(); I != E; ++I) {
+ // Insert commas as we go... the first arg doesn't get a comma
+ if (I)
+ Out << ", ";
+ // Output type...
+ TypePrinter.print(FT->getParamType(I), Out);
+
+ AttributeSet ArgAttrs = Attrs.getParamAttrs(I);
+ if (ArgAttrs.hasAttributes()) {
+ Out << ' ';
+ writeAttributeSet(ArgAttrs);
+ }
+ }
+ } else {
+ // The arguments are meaningful here, print them in detail.
+ for (const Argument &Arg : F->args()) {
+ // Insert commas as we go... the first arg doesn't get a comma
+ if (Arg.getArgNo() != 0)
+ Out << ", ";
+ printArgument(&Arg, Attrs.getParamAttrs(Arg.getArgNo()));
+ }
+ }
+
+ // Finish printing arguments...
+ if (FT->isVarArg()) {
+ if (FT->getNumParams()) Out << ", ";
+ Out << "..."; // Output varargs portion of signature!
+ }
+ Out << ')';
+ StringRef UA = getUnnamedAddrEncoding(F->getUnnamedAddr());
+ if (!UA.empty())
+ Out << ' ' << UA;
+ // We print the function address space if it is non-zero or if we are writing
+ // a module with a non-zero program address space or if there is no valid
+ // Module* so that the file can be parsed without the datalayout string.
+ const Module *Mod = F->getParent();
+ if (F->getAddressSpace() != 0 || !Mod ||
+ Mod->getDataLayout().getProgramAddressSpace() != 0)
+ Out << " addrspace(" << F->getAddressSpace() << ")";
+ if (Attrs.hasFnAttrs())
+ Out << " #" << Machine.getAttributeGroupSlot(Attrs.getFnAttrs());
+ if (F->hasSection()) {
+ Out << " section \"";
+ printEscapedString(F->getSection(), Out);
+ Out << '"';
+ }
+ if (F->hasPartition()) {
+ Out << " partition \"";
+ printEscapedString(F->getPartition(), Out);
+ Out << '"';
+ }
+ maybePrintComdat(Out, *F);
+ if (MaybeAlign A = F->getAlign())
+ Out << " align " << A->value();
+ if (F->hasGC())
+ Out << " gc \"" << F->getGC() << '"';
+ if (F->hasPrefixData()) {
+ Out << " prefix ";
+ writeOperand(F->getPrefixData(), true);
+ }
+ if (F->hasPrologueData()) {
+ Out << " prologue ";
+ writeOperand(F->getPrologueData(), true);
+ }
+ if (F->hasPersonalityFn()) {
+ Out << " personality ";
+ writeOperand(F->getPersonalityFn(), /*PrintType=*/true);
+ }
+
+ if (F->isDeclaration()) {
+ Out << '\n';
+ } else {
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ F->getAllMetadata(MDs);
+ printMetadataAttachments(MDs, " ");
+
+ Out << " {";
+ // Output all of the function's basic blocks.
+ for (const BasicBlock &BB : *F)
+ printBasicBlock(&BB);
+
+ // Output the function's use-lists.
+ printUseLists(F);
+
+ Out << "}\n";
+ }
+
+ Machine.purgeFunction();
+}
+
+/// printArgument - This member is called for every argument that is passed into
+/// the function. Simply print it out
+void AssemblyWriter::printArgument(const Argument *Arg, AttributeSet Attrs) {
+ // Output type...
+ TypePrinter.print(Arg->getType(), Out);
+
+ // Output parameter attributes list
+ if (Attrs.hasAttributes()) {
+ Out << ' ';
+ writeAttributeSet(Attrs);
+ }
+
+ // Output name, if available...
+ if (Arg->hasName()) {
+ Out << ' ';
+ PrintLLVMName(Out, Arg);
+ } else {
+ int Slot = Machine.getLocalSlot(Arg);
+ assert(Slot != -1 && "expect argument in function here");
+ Out << " %" << Slot;
+ }
+}
+
+/// printBasicBlock - This member is called for each basic block in a method.
+void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
+ bool IsEntryBlock = BB->getParent() && BB->isEntryBlock();
+ if (BB->hasName()) { // Print out the label if it exists...
+ Out << "\n";
+ PrintLLVMName(Out, BB->getName(), LabelPrefix);
+ Out << ':';
+ } else if (!IsEntryBlock) {
+ Out << "\n";
+ int Slot = Machine.getLocalSlot(BB);
+ if (Slot != -1)
+ Out << Slot << ":";
+ else
+ Out << "<badref>:";
+ }
+
+ if (!IsEntryBlock) {
+ // Output predecessors for the block.
+ Out.PadToColumn(50);
+ Out << ";";
+ const_pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
+
+ if (PI == PE) {
+ Out << " No predecessors!";
+ } else {
+ Out << " preds = ";
+ writeOperand(*PI, false);
+ for (++PI; PI != PE; ++PI) {
+ Out << ", ";
+ writeOperand(*PI, false);
+ }
+ }
+ }
+
+ Out << "\n";
+
+ if (AnnotationWriter) AnnotationWriter->emitBasicBlockStartAnnot(BB, Out);
+
+ // Output all of the instructions in the basic block...
+ for (const Instruction &I : *BB) {
+ printInstructionLine(I);
+ }
+
+ if (AnnotationWriter) AnnotationWriter->emitBasicBlockEndAnnot(BB, Out);
+}
+
+/// printInstructionLine - Print an instruction and a newline character.
+void AssemblyWriter::printInstructionLine(const Instruction &I) {
+ printInstruction(I);
+ Out << '\n';
+}
+
+/// printGCRelocateComment - print comment after call to the gc.relocate
+/// intrinsic indicating base and derived pointer names.
+void AssemblyWriter::printGCRelocateComment(const GCRelocateInst &Relocate) {
+ Out << " ; (";
+ writeOperand(Relocate.getBasePtr(), false);
+ Out << ", ";
+ writeOperand(Relocate.getDerivedPtr(), false);
+ Out << ")";
+}
+
+/// printInfoComment - Print a little comment after the instruction indicating
+/// which slot it occupies.
+void AssemblyWriter::printInfoComment(const Value &V) {
+ if (const auto *Relocate = dyn_cast<GCRelocateInst>(&V))
+ printGCRelocateComment(*Relocate);
+
+ if (AnnotationWriter)
+ AnnotationWriter->printInfoComment(V, Out);
+}
+
+static void maybePrintCallAddrSpace(const Value *Operand, const Instruction *I,
+ raw_ostream &Out) {
+ // We print the address space of the call if it is non-zero.
+ unsigned CallAddrSpace = Operand->getType()->getPointerAddressSpace();
+ bool PrintAddrSpace = CallAddrSpace != 0;
+ if (!PrintAddrSpace) {
+ const Module *Mod = getModuleFromVal(I);
+ // We also print it if it is zero but not equal to the program address space
+ // or if we can't find a valid Module* to make it possible to parse
+ // the resulting file even without a datalayout string.
+ if (!Mod || Mod->getDataLayout().getProgramAddressSpace() != 0)
+ PrintAddrSpace = true;
+ }
+ if (PrintAddrSpace)
+ Out << " addrspace(" << CallAddrSpace << ")";
+}
+
+// This member is called for each Instruction in a function..
+void AssemblyWriter::printInstruction(const Instruction &I) {
+ if (AnnotationWriter) AnnotationWriter->emitInstructionAnnot(&I, Out);
+
+ // Print out indentation for an instruction.
+ Out << " ";
+
+ // Print out name if it exists...
+ if (I.hasName()) {
+ PrintLLVMName(Out, &I);
+ Out << " = ";
+ } else if (!I.getType()->isVoidTy()) {
+ // Print out the def slot taken.
+ int SlotNum = Machine.getLocalSlot(&I);
+ if (SlotNum == -1)
+ Out << "<badref> = ";
+ else
+ Out << '%' << SlotNum << " = ";
+ }
+
+ if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ if (CI->isMustTailCall())
+ Out << "musttail ";
+ else if (CI->isTailCall())
+ Out << "tail ";
+ else if (CI->isNoTailCall())
+ Out << "notail ";
+ }
+
+ // Print out the opcode...
+ Out << I.getOpcodeName();
+
+ // If this is an atomic load or store, print out the atomic marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
+ Out << " atomic";
+
+ if (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isWeak())
+ Out << " weak";
+
+ // If this is a volatile operation, print out the volatile marker.
+ if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
+ (isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) ||
+ (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
+ (isa<AtomicRMWInst>(I) && cast<AtomicRMWInst>(I).isVolatile()))
+ Out << " volatile";
+
+ // Print out optimization information.
+ WriteOptimizationInfo(Out, &I);
+
+ // Print out the compare instruction predicates
+ if (const CmpInst *CI = dyn_cast<CmpInst>(&I))
+ Out << ' ' << CmpInst::getPredicateName(CI->getPredicate());
+
+ // Print out the atomicrmw operation
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
+ Out << ' ' << AtomicRMWInst::getOperationName(RMWI->getOperation());
+
+ // Print out the type of the operands...
+ const Value *Operand = I.getNumOperands() ? I.getOperand(0) : nullptr;
+
+ // Special case conditional branches to swizzle the condition out to the front
+ if (isa<BranchInst>(I) && cast<BranchInst>(I).isConditional()) {
+ const BranchInst &BI(cast<BranchInst>(I));
+ Out << ' ';
+ writeOperand(BI.getCondition(), true);
+ Out << ", ";
+ writeOperand(BI.getSuccessor(0), true);
+ Out << ", ";
+ writeOperand(BI.getSuccessor(1), true);
+
+ } else if (isa<SwitchInst>(I)) {
+ const SwitchInst& SI(cast<SwitchInst>(I));
+ // Special case switch instruction to get formatting nice and correct.
+ Out << ' ';
+ writeOperand(SI.getCondition(), true);
+ Out << ", ";
+ writeOperand(SI.getDefaultDest(), true);
+ Out << " [";
+ for (auto Case : SI.cases()) {
+ Out << "\n ";
+ writeOperand(Case.getCaseValue(), true);
+ Out << ", ";
+ writeOperand(Case.getCaseSuccessor(), true);
+ }
+ Out << "\n ]";
+ } else if (isa<IndirectBrInst>(I)) {
+ // Special case indirectbr instruction to get formatting nice and correct.
+ Out << ' ';
+ writeOperand(Operand, true);
+ Out << ", [";
+
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
+ if (i != 1)
+ Out << ", ";
+ writeOperand(I.getOperand(i), true);
+ }
+ Out << ']';
+ } else if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ Out << ' ';
+ TypePrinter.print(I.getType(), Out);
+ Out << ' ';
+
+ for (unsigned op = 0, Eop = PN->getNumIncomingValues(); op < Eop; ++op) {
+ if (op) Out << ", ";
+ Out << "[ ";
+ writeOperand(PN->getIncomingValue(op), false); Out << ", ";
+ writeOperand(PN->getIncomingBlock(op), false); Out << " ]";
+ }
+ } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&I)) {
+ Out << ' ';
+ writeOperand(I.getOperand(0), true);
+ for (unsigned i : EVI->indices())
+ Out << ", " << i;
+ } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&I)) {
+ Out << ' ';
+ writeOperand(I.getOperand(0), true); Out << ", ";
+ writeOperand(I.getOperand(1), true);
+ for (unsigned i : IVI->indices())
+ Out << ", " << i;
+ } else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(I.getType(), Out);
+ if (LPI->isCleanup() || LPI->getNumClauses() != 0)
+ Out << '\n';
+
+ if (LPI->isCleanup())
+ Out << " cleanup";
+
+ for (unsigned i = 0, e = LPI->getNumClauses(); i != e; ++i) {
+ if (i != 0 || LPI->isCleanup()) Out << "\n";
+ if (LPI->isCatch(i))
+ Out << " catch ";
+ else
+ Out << " filter ";
+
+ writeOperand(LPI->getClause(i), true);
+ }
+ } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(&I)) {
+ Out << " within ";
+ writeOperand(CatchSwitch->getParentPad(), /*PrintType=*/false);
+ Out << " [";
+ unsigned Op = 0;
+ for (const BasicBlock *PadBB : CatchSwitch->handlers()) {
+ if (Op > 0)
+ Out << ", ";
+ writeOperand(PadBB, /*PrintType=*/true);
+ ++Op;
+ }
+ Out << "] unwind ";
+ if (const BasicBlock *UnwindDest = CatchSwitch->getUnwindDest())
+ writeOperand(UnwindDest, /*PrintType=*/true);
+ else
+ Out << "to caller";
+ } else if (const auto *FPI = dyn_cast<FuncletPadInst>(&I)) {
+ Out << " within ";
+ writeOperand(FPI->getParentPad(), /*PrintType=*/false);
+ Out << " [";
+ for (unsigned Op = 0, NumOps = FPI->getNumArgOperands(); Op < NumOps;
+ ++Op) {
+ if (Op > 0)
+ Out << ", ";
+ writeOperand(FPI->getArgOperand(Op), /*PrintType=*/true);
+ }
+ Out << ']';
+ } else if (isa<ReturnInst>(I) && !Operand) {
+ Out << " void";
+ } else if (const auto *CRI = dyn_cast<CatchReturnInst>(&I)) {
+ Out << " from ";
+ writeOperand(CRI->getOperand(0), /*PrintType=*/false);
+
+ Out << " to ";
+ writeOperand(CRI->getOperand(1), /*PrintType=*/true);
+ } else if (const auto *CRI = dyn_cast<CleanupReturnInst>(&I)) {
+ Out << " from ";
+ writeOperand(CRI->getOperand(0), /*PrintType=*/false);
+
+ Out << " unwind ";
+ if (CRI->hasUnwindDest())
+ writeOperand(CRI->getOperand(1), /*PrintType=*/true);
+ else
+ Out << "to caller";
+ } else if (const CallInst *CI = dyn_cast<CallInst>(&I)) {
+ // Print the calling convention being used.
+ if (CI->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(CI->getCallingConv(), Out);
+ }
+
+ Operand = CI->getCalledOperand();
+ FunctionType *FTy = CI->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+ const AttributeList &PAL = CI->getAttributes();
+
+ if (PAL.hasRetAttrs())
+ Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
+
+ // Only print addrspace(N) if necessary:
+ maybePrintCallAddrSpace(Operand, &I, Out);
+
+ // If possible, print out the short form of the call instruction. We can
+ // only do this if the first argument is a pointer to a nonvararg function,
+ // and if the return type is not a pointer to a function.
+ //
+ Out << ' ';
+ TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out);
+ Out << ' ';
+ writeOperand(Operand, false);
+ Out << '(';
+ for (unsigned op = 0, Eop = CI->arg_size(); op < Eop; ++op) {
+ if (op > 0)
+ Out << ", ";
+ writeParamOperand(CI->getArgOperand(op), PAL.getParamAttrs(op));
+ }
+
+ // Emit an ellipsis if this is a musttail call in a vararg function. This
+ // is only to aid readability, musttail calls forward varargs by default.
+ if (CI->isMustTailCall() && CI->getParent() &&
+ CI->getParent()->getParent() &&
+ CI->getParent()->getParent()->isVarArg())
+ Out << ", ...";
+
+ Out << ')';
+ if (PAL.hasFnAttrs())
+ Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttrs());
+
+ writeOperandBundles(CI);
+ } else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
+ Operand = II->getCalledOperand();
+ FunctionType *FTy = II->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+ const AttributeList &PAL = II->getAttributes();
+
+ // Print the calling convention being used.
+ if (II->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(II->getCallingConv(), Out);
+ }
+
+ if (PAL.hasRetAttrs())
+ Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
+
+ // Only print addrspace(N) if necessary:
+ maybePrintCallAddrSpace(Operand, &I, Out);
+
+ // If possible, print out the short form of the invoke instruction. We can
+ // only do this if the first argument is a pointer to a nonvararg function,
+ // and if the return type is not a pointer to a function.
+ //
+ Out << ' ';
+ TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out);
+ Out << ' ';
+ writeOperand(Operand, false);
+ Out << '(';
+ for (unsigned op = 0, Eop = II->arg_size(); op < Eop; ++op) {
+ if (op)
+ Out << ", ";
+ writeParamOperand(II->getArgOperand(op), PAL.getParamAttrs(op));
+ }
+
+ Out << ')';
+ if (PAL.hasFnAttrs())
+ Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttrs());
+
+ writeOperandBundles(II);
+
+ Out << "\n to ";
+ writeOperand(II->getNormalDest(), true);
+ Out << " unwind ";
+ writeOperand(II->getUnwindDest(), true);
+ } else if (const CallBrInst *CBI = dyn_cast<CallBrInst>(&I)) {
+ Operand = CBI->getCalledOperand();
+ FunctionType *FTy = CBI->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+ const AttributeList &PAL = CBI->getAttributes();
+
+ // Print the calling convention being used.
+ if (CBI->getCallingConv() != CallingConv::C) {
+ Out << " ";
+ PrintCallingConv(CBI->getCallingConv(), Out);
+ }
+
+ if (PAL.hasRetAttrs())
+ Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
+
+ // If possible, print out the short form of the callbr instruction. We can
+ // only do this if the first argument is a pointer to a nonvararg function,
+ // and if the return type is not a pointer to a function.
+ //
+ Out << ' ';
+ TypePrinter.print(FTy->isVarArg() ? FTy : RetTy, Out);
+ Out << ' ';
+ writeOperand(Operand, false);
+ Out << '(';
+ for (unsigned op = 0, Eop = CBI->arg_size(); op < Eop; ++op) {
+ if (op)
+ Out << ", ";
+ writeParamOperand(CBI->getArgOperand(op), PAL.getParamAttrs(op));
+ }
+
+ Out << ')';
+ if (PAL.hasFnAttrs())
+ Out << " #" << Machine.getAttributeGroupSlot(PAL.getFnAttrs());
+
+ writeOperandBundles(CBI);
+
+ Out << "\n to ";
+ writeOperand(CBI->getDefaultDest(), true);
+ Out << " [";
+ for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) {
+ if (i != 0)
+ Out << ", ";
+ writeOperand(CBI->getIndirectDest(i), true);
+ }
+ Out << ']';
+ } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
+ Out << ' ';
+ if (AI->isUsedWithInAlloca())
+ Out << "inalloca ";
+ if (AI->isSwiftError())
+ Out << "swifterror ";
+ TypePrinter.print(AI->getAllocatedType(), Out);
+
+ // Explicitly write the array size if the code is broken, if it's an array
+ // allocation, or if the type is not canonical for scalar allocations. The
+ // latter case prevents the type from mutating when round-tripping through
+ // assembly.
+ if (!AI->getArraySize() || AI->isArrayAllocation() ||
+ !AI->getArraySize()->getType()->isIntegerTy(32)) {
+ Out << ", ";
+ writeOperand(AI->getArraySize(), true);
+ }
+ if (MaybeAlign A = AI->getAlign()) {
+ Out << ", align " << A->value();
+ }
+
+ unsigned AddrSpace = AI->getType()->getAddressSpace();
+ if (AddrSpace != 0) {
+ Out << ", addrspace(" << AddrSpace << ')';
+ }
+ } else if (isa<CastInst>(I)) {
+ if (Operand) {
+ Out << ' ';
+ writeOperand(Operand, true); // Work with broken code
+ }
+ Out << " to ";
+ TypePrinter.print(I.getType(), Out);
+ } else if (isa<VAArgInst>(I)) {
+ if (Operand) {
+ Out << ' ';
+ writeOperand(Operand, true); // Work with broken code
+ }
+ Out << ", ";
+ TypePrinter.print(I.getType(), Out);
+ } else if (Operand) { // Print the normal way.
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(GEP->getSourceElementType(), Out);
+ Out << ',';
+ } else if (const auto *LI = dyn_cast<LoadInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(LI->getType(), Out);
+ Out << ',';
+ }
+
+ // PrintAllTypes - Instructions who have operands of all the same type
+ // omit the type from all but the first operand. If the instruction has
+ // different type operands (for example br), then they are all printed.
+ bool PrintAllTypes = false;
+ Type *TheType = Operand->getType();
+
+ // Select, Store, ShuffleVector and CmpXchg always print all types.
+ if (isa<SelectInst>(I) || isa<StoreInst>(I) || isa<ShuffleVectorInst>(I) ||
+ isa<ReturnInst>(I) || isa<AtomicCmpXchgInst>(I)) {
+ PrintAllTypes = true;
+ } else {
+ for (unsigned i = 1, E = I.getNumOperands(); i != E; ++i) {
+ Operand = I.getOperand(i);
+ // note that Operand shouldn't be null, but the test helps make dump()
+ // more tolerant of malformed IR
+ if (Operand && Operand->getType() != TheType) {
+ PrintAllTypes = true; // We have differing types! Print them all!
+ break;
+ }
+ }
+ }
+
+ if (!PrintAllTypes) {
+ Out << ' ';
+ TypePrinter.print(TheType, Out);
+ }
+
+ Out << ' ';
+ for (unsigned i = 0, E = I.getNumOperands(); i != E; ++i) {
+ if (i) Out << ", ";
+ writeOperand(I.getOperand(i), PrintAllTypes);
+ }
+ }
+
+ // Print atomic ordering/alignment for memory operations
+ if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+ if (LI->isAtomic())
+ writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID());
+ if (MaybeAlign A = LI->getAlign())
+ Out << ", align " << A->value();
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+ if (SI->isAtomic())
+ writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID());
+ if (MaybeAlign A = SI->getAlign())
+ Out << ", align " << A->value();
+ } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
+ writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(),
+ CXI->getFailureOrdering(), CXI->getSyncScopeID());
+ Out << ", align " << CXI->getAlign().value();
+ } else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
+ writeAtomic(RMWI->getContext(), RMWI->getOrdering(),
+ RMWI->getSyncScopeID());
+ Out << ", align " << RMWI->getAlign().value();
+ } else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
+ writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID());
+ } else if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(&I)) {
+ PrintShuffleMask(Out, SVI->getType(), SVI->getShuffleMask());
+ }
+
+ // Print Metadata info.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> InstMD;
+ I.getAllMetadata(InstMD);
+ printMetadataAttachments(InstMD, ", ");
+
+ // Print a nice comment.
+ printInfoComment(I);
+}
+
+void AssemblyWriter::printMetadataAttachments(
+ const SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs,
+ StringRef Separator) {
+ if (MDs.empty())
+ return;
+
+ if (MDNames.empty())
+ MDs[0].second->getContext().getMDKindNames(MDNames);
+
+ auto WriterCtx = getContext();
+ for (const auto &I : MDs) {
+ unsigned Kind = I.first;
+ Out << Separator;
+ if (Kind < MDNames.size()) {
+ Out << "!";
+ printMetadataIdentifier(MDNames[Kind], Out);
+ } else
+ Out << "!<unknown kind #" << Kind << ">";
+ Out << ' ';
+ WriteAsOperandInternal(Out, I.second, WriterCtx);
+ }
+}
+
+void AssemblyWriter::writeMDNode(unsigned Slot, const MDNode *Node) {
+ Out << '!' << Slot << " = ";
+ printMDNodeBody(Node);
+ Out << "\n";
+}
+
+void AssemblyWriter::writeAllMDNodes() {
+ SmallVector<const MDNode *, 16> Nodes;
+ Nodes.resize(Machine.mdn_size());
+ for (auto &I : llvm::make_range(Machine.mdn_begin(), Machine.mdn_end()))
+ Nodes[I.second] = cast<MDNode>(I.first);
+
+ for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
+ writeMDNode(i, Nodes[i]);
+ }
+}
+
+void AssemblyWriter::printMDNodeBody(const MDNode *Node) {
+ auto WriterCtx = getContext();
+ WriteMDNodeBodyInternal(Out, Node, WriterCtx);
+}
+
+void AssemblyWriter::writeAttribute(const Attribute &Attr, bool InAttrGroup) {
+ if (!Attr.isTypeAttribute()) {
+ Out << Attr.getAsString(InAttrGroup);
+ return;
+ }
+
+ Out << Attribute::getNameFromAttrKind(Attr.getKindAsEnum());
+ if (Type *Ty = Attr.getValueAsType()) {
+ Out << '(';
+ TypePrinter.print(Ty, Out);
+ Out << ')';
+ }
+}
+
+void AssemblyWriter::writeAttributeSet(const AttributeSet &AttrSet,
+ bool InAttrGroup) {
+ bool FirstAttr = true;
+ for (const auto &Attr : AttrSet) {
+ if (!FirstAttr)
+ Out << ' ';
+ writeAttribute(Attr, InAttrGroup);
+ FirstAttr = false;
+ }
+}
+
+void AssemblyWriter::writeAllAttributeGroups() {
+ std::vector<std::pair<AttributeSet, unsigned>> asVec;
+ asVec.resize(Machine.as_size());
+
+ for (auto &I : llvm::make_range(Machine.as_begin(), Machine.as_end()))
+ asVec[I.second] = I;
+
+ for (const auto &I : asVec)
+ Out << "attributes #" << I.second << " = { "
+ << I.first.getAsString(true) << " }\n";
+}
+
+void AssemblyWriter::printUseListOrder(const Value *V,
+ const std::vector<unsigned> &Shuffle) {
+ bool IsInFunction = Machine.getFunction();
+ if (IsInFunction)
+ Out << " ";
+
+ Out << "uselistorder";
+ if (const BasicBlock *BB = IsInFunction ? nullptr : dyn_cast<BasicBlock>(V)) {
+ Out << "_bb ";
+ writeOperand(BB->getParent(), false);
+ Out << ", ";
+ writeOperand(BB, false);
+ } else {
+ Out << " ";
+ writeOperand(V, true);
+ }
+ Out << ", { ";
+
+ assert(Shuffle.size() >= 2 && "Shuffle too small");
+ Out << Shuffle[0];
+ for (unsigned I = 1, E = Shuffle.size(); I != E; ++I)
+ Out << ", " << Shuffle[I];
+ Out << " }\n";
+}
+
+void AssemblyWriter::printUseLists(const Function *F) {
+ auto It = UseListOrders.find(F);
+ if (It == UseListOrders.end())
+ return;
+
+ Out << "\n; uselistorder directives\n";
+ for (const auto &Pair : It->second)
+ printUseListOrder(Pair.first, Pair.second);
+}
+
+//===----------------------------------------------------------------------===//
+// External Interface declarations
+//===----------------------------------------------------------------------===//
+
+void Function::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW,
+ bool ShouldPreserveUseListOrder,
+ bool IsForDebug) const {
+ SlotTracker SlotTable(this->getParent());
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, this->getParent(), AAW,
+ IsForDebug,
+ ShouldPreserveUseListOrder);
+ W.printFunction(this);
+}
+
+void BasicBlock::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW,
+ bool ShouldPreserveUseListOrder,
+ bool IsForDebug) const {
+ SlotTracker SlotTable(this->getParent());
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, this->getModule(), AAW,
+ IsForDebug,
+ ShouldPreserveUseListOrder);
+ W.printBasicBlock(this);
+}
+
+void Module::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW,
+ bool ShouldPreserveUseListOrder, bool IsForDebug) const {
+ SlotTracker SlotTable(this);
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, this, AAW, IsForDebug,
+ ShouldPreserveUseListOrder);
+ W.printModule(this);
+}
+
+void NamedMDNode::print(raw_ostream &ROS, bool IsForDebug) const {
+ SlotTracker SlotTable(getParent());
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, getParent(), nullptr, IsForDebug);
+ W.printNamedMDNode(this);
+}
+
+void NamedMDNode::print(raw_ostream &ROS, ModuleSlotTracker &MST,
+ bool IsForDebug) const {
+ Optional<SlotTracker> LocalST;
+ SlotTracker *SlotTable;
+ if (auto *ST = MST.getMachine())
+ SlotTable = ST;
+ else {
+ LocalST.emplace(getParent());
+ SlotTable = &*LocalST;
+ }
+
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, *SlotTable, getParent(), nullptr, IsForDebug);
+ W.printNamedMDNode(this);
+}
+
+void Comdat::print(raw_ostream &ROS, bool /*IsForDebug*/) const {
+ PrintLLVMName(ROS, getName(), ComdatPrefix);
+ ROS << " = comdat ";
+
+ switch (getSelectionKind()) {
+ case Comdat::Any:
+ ROS << "any";
+ break;
+ case Comdat::ExactMatch:
+ ROS << "exactmatch";
+ break;
+ case Comdat::Largest:
+ ROS << "largest";
+ break;
+ case Comdat::NoDeduplicate:
+ ROS << "nodeduplicate";
+ break;
+ case Comdat::SameSize:
+ ROS << "samesize";
+ break;
+ }
+
+ ROS << '\n';
+}
+
+void Type::print(raw_ostream &OS, bool /*IsForDebug*/, bool NoDetails) const {
+ TypePrinting TP;
+ TP.print(const_cast<Type*>(this), OS);
+
+ if (NoDetails)
+ return;
+
+ // If the type is a named struct type, print the body as well.
+ if (StructType *STy = dyn_cast<StructType>(const_cast<Type*>(this)))
+ if (!STy->isLiteral()) {
+ OS << " = type ";
+ TP.printStructBody(STy, OS);
+ }
+}
+
+static bool isReferencingMDNode(const Instruction &I) {
+ if (const auto *CI = dyn_cast<CallInst>(&I))
+ if (Function *F = CI->getCalledFunction())
+ if (F->isIntrinsic())
+ for (auto &Op : I.operands())
+ if (auto *V = dyn_cast_or_null<MetadataAsValue>(Op))
+ if (isa<MDNode>(V->getMetadata()))
+ return true;
+ return false;
+}
+
+void Value::print(raw_ostream &ROS, bool IsForDebug) const {
+ bool ShouldInitializeAllMetadata = false;
+ if (auto *I = dyn_cast<Instruction>(this))
+ ShouldInitializeAllMetadata = isReferencingMDNode(*I);
+ else if (isa<Function>(this) || isa<MetadataAsValue>(this))
+ ShouldInitializeAllMetadata = true;
+
+ ModuleSlotTracker MST(getModuleFromVal(this), ShouldInitializeAllMetadata);
+ print(ROS, MST, IsForDebug);
+}
+
+void Value::print(raw_ostream &ROS, ModuleSlotTracker &MST,
+ bool IsForDebug) const {
+ formatted_raw_ostream OS(ROS);
+ SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
+ SlotTracker &SlotTable =
+ MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
+ auto incorporateFunction = [&](const Function *F) {
+ if (F)
+ MST.incorporateFunction(*F);
+ };
+
+ if (const Instruction *I = dyn_cast<Instruction>(this)) {
+ incorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr);
+ AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr, IsForDebug);
+ W.printInstruction(*I);
+ } else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) {
+ incorporateFunction(BB->getParent());
+ AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr, IsForDebug);
+ W.printBasicBlock(BB);
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) {
+ AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr, IsForDebug);
+ if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV))
+ W.printGlobal(V);
+ else if (const Function *F = dyn_cast<Function>(GV))
+ W.printFunction(F);
+ else if (const GlobalAlias *A = dyn_cast<GlobalAlias>(GV))
+ W.printAlias(A);
+ else if (const GlobalIFunc *I = dyn_cast<GlobalIFunc>(GV))
+ W.printIFunc(I);
+ else
+ llvm_unreachable("Unknown GlobalValue to print out!");
+ } else if (const MetadataAsValue *V = dyn_cast<MetadataAsValue>(this)) {
+ V->getMetadata()->print(ROS, MST, getModuleFromVal(V));
+ } else if (const Constant *C = dyn_cast<Constant>(this)) {
+ TypePrinting TypePrinter;
+ TypePrinter.print(C->getType(), OS);
+ OS << ' ';
+ AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine());
+ WriteConstantInternal(OS, C, WriterCtx);
+ } else if (isa<InlineAsm>(this) || isa<Argument>(this)) {
+ this->printAsOperand(OS, /* PrintType */ true, MST);
+ } else {
+ llvm_unreachable("Unknown value to print out!");
+ }
+}
+
+/// Print without a type, skipping the TypePrinting object.
+///
+/// \return \c true iff printing was successful.
+static bool printWithoutType(const Value &V, raw_ostream &O,
+ SlotTracker *Machine, const Module *M) {
+ if (V.hasName() || isa<GlobalValue>(V) ||
+ (!isa<Constant>(V) && !isa<MetadataAsValue>(V))) {
+ AsmWriterContext WriterCtx(nullptr, Machine, M);
+ WriteAsOperandInternal(O, &V, WriterCtx);
+ return true;
+ }
+ return false;
+}
+
+static void printAsOperandImpl(const Value &V, raw_ostream &O, bool PrintType,
+ ModuleSlotTracker &MST) {
+ TypePrinting TypePrinter(MST.getModule());
+ if (PrintType) {
+ TypePrinter.print(V.getType(), O);
+ O << ' ';
+ }
+
+ AsmWriterContext WriterCtx(&TypePrinter, MST.getMachine(), MST.getModule());
+ WriteAsOperandInternal(O, &V, WriterCtx);
+}
+
+void Value::printAsOperand(raw_ostream &O, bool PrintType,
+ const Module *M) const {
+ if (!M)
+ M = getModuleFromVal(this);
+
+ if (!PrintType)
+ if (printWithoutType(*this, O, nullptr, M))
+ return;
+
+ SlotTracker Machine(
+ M, /* ShouldInitializeAllMetadata */ isa<MetadataAsValue>(this));
+ ModuleSlotTracker MST(Machine, M);
+ printAsOperandImpl(*this, O, PrintType, MST);
+}
+
+void Value::printAsOperand(raw_ostream &O, bool PrintType,
+ ModuleSlotTracker &MST) const {
+ if (!PrintType)
+ if (printWithoutType(*this, O, MST.getMachine(), MST.getModule()))
+ return;
+
+ printAsOperandImpl(*this, O, PrintType, MST);
+}
+
+/// Recursive version of printMetadataImpl.
+static void printMetadataImplRec(raw_ostream &ROS, const Metadata &MD,
+ AsmWriterContext &WriterCtx) {
+ formatted_raw_ostream OS(ROS);
+ WriteAsOperandInternal(OS, &MD, WriterCtx, /* FromValue */ true);
+
+ auto *N = dyn_cast<MDNode>(&MD);
+ if (!N || isa<DIExpression>(MD) || isa<DIArgList>(MD))
+ return;
+
+ OS << " = ";
+ WriteMDNodeBodyInternal(OS, N, WriterCtx);
+}
+
+namespace {
+struct MDTreeAsmWriterContext : public AsmWriterContext {
+ unsigned Level;
+ // {Level, Printed string}
+ using EntryTy = std::pair<unsigned, std::string>;
+ SmallVector<EntryTy, 4> Buffer;
+
+ // Used to break the cycle in case there is any.
+ SmallPtrSet<const Metadata *, 4> Visited;
+
+ raw_ostream &MainOS;
+
+ MDTreeAsmWriterContext(TypePrinting *TP, SlotTracker *ST, const Module *M,
+ raw_ostream &OS, const Metadata *InitMD)
+ : AsmWriterContext(TP, ST, M), Level(0U), Visited({InitMD}), MainOS(OS) {}
+
+ void onWriteMetadataAsOperand(const Metadata *MD) override {
+ if (!Visited.insert(MD).second)
+ return;
+
+ std::string Str;
+ raw_string_ostream SS(Str);
+ ++Level;
+ // A placeholder entry to memorize the correct
+ // position in buffer.
+ Buffer.emplace_back(std::make_pair(Level, ""));
+ unsigned InsertIdx = Buffer.size() - 1;
+
+ printMetadataImplRec(SS, *MD, *this);
+ Buffer[InsertIdx].second = std::move(SS.str());
+ --Level;
+ }
+
+ ~MDTreeAsmWriterContext() {
+ for (const auto &Entry : Buffer) {
+ MainOS << "\n";
+ unsigned NumIndent = Entry.first * 2U;
+ MainOS.indent(NumIndent) << Entry.second;
+ }
+ }
+};
+} // end anonymous namespace
+
+static void printMetadataImpl(raw_ostream &ROS, const Metadata &MD,
+ ModuleSlotTracker &MST, const Module *M,
+ bool OnlyAsOperand, bool PrintAsTree = false) {
+ formatted_raw_ostream OS(ROS);
+
+ TypePrinting TypePrinter(M);
+
+ std::unique_ptr<AsmWriterContext> WriterCtx;
+ if (PrintAsTree && !OnlyAsOperand)
+ WriterCtx = std::make_unique<MDTreeAsmWriterContext>(
+ &TypePrinter, MST.getMachine(), M, OS, &MD);
+ else
+ WriterCtx =
+ std::make_unique<AsmWriterContext>(&TypePrinter, MST.getMachine(), M);
+
+ WriteAsOperandInternal(OS, &MD, *WriterCtx, /* FromValue */ true);
+
+ auto *N = dyn_cast<MDNode>(&MD);
+ if (OnlyAsOperand || !N || isa<DIExpression>(MD) || isa<DIArgList>(MD))
+ return;
+
+ OS << " = ";
+ WriteMDNodeBodyInternal(OS, N, *WriterCtx);
+}
+
+void Metadata::printAsOperand(raw_ostream &OS, const Module *M) const {
+ ModuleSlotTracker MST(M, isa<MDNode>(this));
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ true);
+}
+
+void Metadata::printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST,
+ const Module *M) const {
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ true);
+}
+
+void Metadata::print(raw_ostream &OS, const Module *M,
+ bool /*IsForDebug*/) const {
+ ModuleSlotTracker MST(M, isa<MDNode>(this));
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false);
+}
+
+void Metadata::print(raw_ostream &OS, ModuleSlotTracker &MST,
+ const Module *M, bool /*IsForDebug*/) const {
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false);
+}
+
+void MDNode::printTree(raw_ostream &OS, const Module *M) const {
+ ModuleSlotTracker MST(M, true);
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false,
+ /*PrintAsTree=*/true);
+}
+
+void MDNode::printTree(raw_ostream &OS, ModuleSlotTracker &MST,
+ const Module *M) const {
+ printMetadataImpl(OS, *this, MST, M, /* OnlyAsOperand */ false,
+ /*PrintAsTree=*/true);
+}
+
+void ModuleSummaryIndex::print(raw_ostream &ROS, bool IsForDebug) const {
+ SlotTracker SlotTable(this);
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, this, IsForDebug);
+ W.printModuleSummaryIndex();
+}
+
+void ModuleSlotTracker::collectMDNodes(MachineMDNodeListType &L, unsigned LB,
+ unsigned UB) const {
+ SlotTracker *ST = MachineStorage.get();
+ if (!ST)
+ return;
+
+ for (auto &I : llvm::make_range(ST->mdn_begin(), ST->mdn_end()))
+ if (I.second >= LB && I.second < UB)
+ L.push_back(std::make_pair(I.second, I.first));
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+// Value::dump - allow easy printing of Values from the debugger.
+LLVM_DUMP_METHOD
+void Value::dump() const { print(dbgs(), /*IsForDebug=*/true); dbgs() << '\n'; }
+
+// Type::dump - allow easy printing of Types from the debugger.
+LLVM_DUMP_METHOD
+void Type::dump() const { print(dbgs(), /*IsForDebug=*/true); dbgs() << '\n'; }
+
+// Module::dump() - Allow printing of Modules from the debugger.
+LLVM_DUMP_METHOD
+void Module::dump() const {
+ print(dbgs(), nullptr,
+ /*ShouldPreserveUseListOrder=*/false, /*IsForDebug=*/true);
+}
+
+// Allow printing of Comdats from the debugger.
+LLVM_DUMP_METHOD
+void Comdat::dump() const { print(dbgs(), /*IsForDebug=*/true); }
+
+// NamedMDNode::dump() - Allow printing of NamedMDNodes from the debugger.
+LLVM_DUMP_METHOD
+void NamedMDNode::dump() const { print(dbgs(), /*IsForDebug=*/true); }
+
+LLVM_DUMP_METHOD
+void Metadata::dump() const { dump(nullptr); }
+
+LLVM_DUMP_METHOD
+void Metadata::dump(const Module *M) const {
+ print(dbgs(), M, /*IsForDebug=*/true);
+ dbgs() << '\n';
+}
+
+LLVM_DUMP_METHOD
+void MDNode::dumpTree() const { dumpTree(nullptr); }
+
+LLVM_DUMP_METHOD
+void MDNode::dumpTree(const Module *M) const {
+ printTree(dbgs(), M);
+ dbgs() << '\n';
+}
+
+// Allow printing of ModuleSummaryIndex from the debugger.
+LLVM_DUMP_METHOD
+void ModuleSummaryIndex::dump() const { print(dbgs(), /*IsForDebug=*/true); }
+#endif
diff --git a/contrib/llvm-project/llvm/lib/IR/Assumptions.cpp b/contrib/llvm-project/llvm/lib/IR/Assumptions.cpp
new file mode 100644
index 000000000000..27977d5d56b0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Assumptions.cpp
@@ -0,0 +1,111 @@
+//===- Assumptions.cpp ------ Collection of helpers for assumptions -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements helper functions for accessing assumption infomration
+// inside of the "llvm.assume" metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Assumptions.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+
+using namespace llvm;
+
+namespace {
+bool hasAssumption(const Attribute &A,
+ const KnownAssumptionString &AssumptionStr) {
+ if (!A.isValid())
+ return false;
+ assert(A.isStringAttribute() && "Expected a string attribute!");
+
+ SmallVector<StringRef, 8> Strings;
+ A.getValueAsString().split(Strings, ",");
+
+ return llvm::is_contained(Strings, AssumptionStr);
+}
+
+DenseSet<StringRef> getAssumptions(const Attribute &A) {
+ if (!A.isValid())
+ return DenseSet<StringRef>();
+ assert(A.isStringAttribute() && "Expected a string attribute!");
+
+ DenseSet<StringRef> Assumptions;
+ SmallVector<StringRef, 8> Strings;
+ A.getValueAsString().split(Strings, ",");
+
+ for (StringRef Str : Strings)
+ Assumptions.insert(Str);
+ return Assumptions;
+}
+
+template <typename AttrSite>
+bool addAssumptionsImpl(AttrSite &Site,
+ const DenseSet<StringRef> &Assumptions) {
+ if (Assumptions.empty())
+ return false;
+
+ DenseSet<StringRef> CurAssumptions = getAssumptions(Site);
+
+ if (!set_union(CurAssumptions, Assumptions))
+ return false;
+
+ LLVMContext &Ctx = Site.getContext();
+ Site.addFnAttr(llvm::Attribute::get(
+ Ctx, llvm::AssumptionAttrKey,
+ llvm::join(CurAssumptions.begin(), CurAssumptions.end(), ",")));
+
+ return true;
+}
+} // namespace
+
+bool llvm::hasAssumption(const Function &F,
+ const KnownAssumptionString &AssumptionStr) {
+ const Attribute &A = F.getFnAttribute(AssumptionAttrKey);
+ return ::hasAssumption(A, AssumptionStr);
+}
+
+bool llvm::hasAssumption(const CallBase &CB,
+ const KnownAssumptionString &AssumptionStr) {
+ if (Function *F = CB.getCalledFunction())
+ if (hasAssumption(*F, AssumptionStr))
+ return true;
+
+ const Attribute &A = CB.getFnAttr(AssumptionAttrKey);
+ return ::hasAssumption(A, AssumptionStr);
+}
+
+DenseSet<StringRef> llvm::getAssumptions(const Function &F) {
+ const Attribute &A = F.getFnAttribute(AssumptionAttrKey);
+ return ::getAssumptions(A);
+}
+
+DenseSet<StringRef> llvm::getAssumptions(const CallBase &CB) {
+ const Attribute &A = CB.getFnAttr(AssumptionAttrKey);
+ return ::getAssumptions(A);
+}
+
+bool llvm::addAssumptions(Function &F, const DenseSet<StringRef> &Assumptions) {
+ return ::addAssumptionsImpl(F, Assumptions);
+}
+
+bool llvm::addAssumptions(CallBase &CB,
+ const DenseSet<StringRef> &Assumptions) {
+ return ::addAssumptionsImpl(CB, Assumptions);
+}
+
+StringSet<> llvm::KnownAssumptionStrings({
+ "omp_no_openmp", // OpenMP 5.1
+ "omp_no_openmp_routines", // OpenMP 5.1
+ "omp_no_parallelism", // OpenMP 5.1
+ "ompx_spmd_amenable", // OpenMPOpt extension
+ "ompx_no_call_asm", // OpenMPOpt extension
+});
diff --git a/contrib/llvm-project/llvm/lib/IR/AttributeImpl.h b/contrib/llvm-project/llvm/lib/IR/AttributeImpl.h
new file mode 100644
index 000000000000..5eb958f5786a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/AttributeImpl.h
@@ -0,0 +1,333 @@
+//===- AttributeImpl.h - Attribute Internals --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines various helper methods and classes used by
+/// LLVMContextImpl for creating and managing attributes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_ATTRIBUTEIMPL_H
+#define LLVM_LIB_IR_ATTRIBUTEIMPL_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/Support/TrailingObjects.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class LLVMContext;
+class Type;
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// This class represents a single, uniqued attribute. That attribute
+/// could be a single enum, a tuple, or a string.
+class AttributeImpl : public FoldingSetNode {
+ unsigned char KindID; ///< Holds the AttrEntryKind of the attribute
+
+protected:
+ enum AttrEntryKind {
+ EnumAttrEntry,
+ IntAttrEntry,
+ StringAttrEntry,
+ TypeAttrEntry,
+ };
+
+ AttributeImpl(AttrEntryKind KindID) : KindID(KindID) {}
+
+public:
+ // AttributesImpl is uniqued, these should not be available.
+ AttributeImpl(const AttributeImpl &) = delete;
+ AttributeImpl &operator=(const AttributeImpl &) = delete;
+
+ bool isEnumAttribute() const { return KindID == EnumAttrEntry; }
+ bool isIntAttribute() const { return KindID == IntAttrEntry; }
+ bool isStringAttribute() const { return KindID == StringAttrEntry; }
+ bool isTypeAttribute() const { return KindID == TypeAttrEntry; }
+
+ bool hasAttribute(Attribute::AttrKind A) const;
+ bool hasAttribute(StringRef Kind) const;
+
+ Attribute::AttrKind getKindAsEnum() const;
+ uint64_t getValueAsInt() const;
+ bool getValueAsBool() const;
+
+ StringRef getKindAsString() const;
+ StringRef getValueAsString() const;
+
+ Type *getValueAsType() const;
+
+ /// Used when sorting the attributes.
+ bool operator<(const AttributeImpl &AI) const;
+
+ void Profile(FoldingSetNodeID &ID) const {
+ if (isEnumAttribute())
+ Profile(ID, getKindAsEnum(), static_cast<uint64_t>(0));
+ else if (isIntAttribute())
+ Profile(ID, getKindAsEnum(), getValueAsInt());
+ else if (isStringAttribute())
+ Profile(ID, getKindAsString(), getValueAsString());
+ else
+ Profile(ID, getKindAsEnum(), getValueAsType());
+ }
+
+ static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind,
+ uint64_t Val) {
+ ID.AddInteger(Kind);
+ if (Val) ID.AddInteger(Val);
+ }
+
+ static void Profile(FoldingSetNodeID &ID, StringRef Kind, StringRef Values) {
+ ID.AddString(Kind);
+ if (!Values.empty()) ID.AddString(Values);
+ }
+
+ static void Profile(FoldingSetNodeID &ID, Attribute::AttrKind Kind,
+ Type *Ty) {
+ ID.AddInteger(Kind);
+ ID.AddPointer(Ty);
+ }
+};
+
+static_assert(std::is_trivially_destructible<AttributeImpl>::value,
+ "AttributeImpl should be trivially destructible");
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// A set of classes that contain the value of the
+/// attribute object. There are three main categories: enum attribute entries,
+/// represented by Attribute::AttrKind; alignment attribute entries; and string
+/// attribute enties, which are for target-dependent attributes.
+
+class EnumAttributeImpl : public AttributeImpl {
+ Attribute::AttrKind Kind;
+
+protected:
+ EnumAttributeImpl(AttrEntryKind ID, Attribute::AttrKind Kind)
+ : AttributeImpl(ID), Kind(Kind) {}
+
+public:
+ EnumAttributeImpl(Attribute::AttrKind Kind)
+ : AttributeImpl(EnumAttrEntry), Kind(Kind) {
+ assert(Kind != Attribute::AttrKind::None &&
+ "Can't create a None attribute!");
+ }
+
+ Attribute::AttrKind getEnumKind() const { return Kind; }
+};
+
+class IntAttributeImpl : public EnumAttributeImpl {
+ uint64_t Val;
+
+public:
+ IntAttributeImpl(Attribute::AttrKind Kind, uint64_t Val)
+ : EnumAttributeImpl(IntAttrEntry, Kind), Val(Val) {
+ assert(Attribute::isIntAttrKind(Kind) &&
+ "Wrong kind for int attribute!");
+ }
+
+ uint64_t getValue() const { return Val; }
+};
+
+class StringAttributeImpl final
+ : public AttributeImpl,
+ private TrailingObjects<StringAttributeImpl, char> {
+ friend TrailingObjects;
+
+ unsigned KindSize;
+ unsigned ValSize;
+ size_t numTrailingObjects(OverloadToken<char>) const {
+ return KindSize + 1 + ValSize + 1;
+ }
+
+public:
+ StringAttributeImpl(StringRef Kind, StringRef Val = StringRef())
+ : AttributeImpl(StringAttrEntry), KindSize(Kind.size()),
+ ValSize(Val.size()) {
+ char *TrailingString = getTrailingObjects<char>();
+ // Some users rely on zero-termination.
+ llvm::copy(Kind, TrailingString);
+ TrailingString[KindSize] = '\0';
+ llvm::copy(Val, &TrailingString[KindSize + 1]);
+ TrailingString[KindSize + 1 + ValSize] = '\0';
+ }
+
+ StringRef getStringKind() const {
+ return StringRef(getTrailingObjects<char>(), KindSize);
+ }
+ StringRef getStringValue() const {
+ return StringRef(getTrailingObjects<char>() + KindSize + 1, ValSize);
+ }
+
+ static size_t totalSizeToAlloc(StringRef Kind, StringRef Val) {
+ return TrailingObjects::totalSizeToAlloc<char>(Kind.size() + 1 +
+ Val.size() + 1);
+ }
+};
+
+class TypeAttributeImpl : public EnumAttributeImpl {
+ Type *Ty;
+
+public:
+ TypeAttributeImpl(Attribute::AttrKind Kind, Type *Ty)
+ : EnumAttributeImpl(TypeAttrEntry, Kind), Ty(Ty) {}
+
+ Type *getTypeValue() const { return Ty; }
+};
+
+class AttributeBitSet {
+ /// Bitset with a bit for each available attribute Attribute::AttrKind.
+ uint8_t AvailableAttrs[12] = {};
+ static_assert(Attribute::EndAttrKinds <= sizeof(AvailableAttrs) * CHAR_BIT,
+ "Too many attributes");
+
+public:
+ bool hasAttribute(Attribute::AttrKind Kind) const {
+ return AvailableAttrs[Kind / 8] & (1 << (Kind % 8));
+ }
+
+ void addAttribute(Attribute::AttrKind Kind) {
+ AvailableAttrs[Kind / 8] |= 1 << (Kind % 8);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// This class represents a group of attributes that apply to one
+/// element: function, return type, or parameter.
+class AttributeSetNode final
+ : public FoldingSetNode,
+ private TrailingObjects<AttributeSetNode, Attribute> {
+ friend TrailingObjects;
+
+ unsigned NumAttrs; ///< Number of attributes in this node.
+ AttributeBitSet AvailableAttrs; ///< Available enum attributes.
+
+ DenseMap<StringRef, Attribute> StringAttrs;
+
+ AttributeSetNode(ArrayRef<Attribute> Attrs);
+
+ static AttributeSetNode *getSorted(LLVMContext &C,
+ ArrayRef<Attribute> SortedAttrs);
+ Optional<Attribute> findEnumAttribute(Attribute::AttrKind Kind) const;
+
+public:
+ // AttributesSetNode is uniqued, these should not be available.
+ AttributeSetNode(const AttributeSetNode &) = delete;
+ AttributeSetNode &operator=(const AttributeSetNode &) = delete;
+
+ void operator delete(void *p) { ::operator delete(p); }
+
+ static AttributeSetNode *get(LLVMContext &C, const AttrBuilder &B);
+
+ static AttributeSetNode *get(LLVMContext &C, ArrayRef<Attribute> Attrs);
+
+ /// Return the number of attributes this AttributeList contains.
+ unsigned getNumAttributes() const { return NumAttrs; }
+
+ bool hasAttribute(Attribute::AttrKind Kind) const {
+ return AvailableAttrs.hasAttribute(Kind);
+ }
+ bool hasAttribute(StringRef Kind) const;
+ bool hasAttributes() const { return NumAttrs != 0; }
+
+ Attribute getAttribute(Attribute::AttrKind Kind) const;
+ Attribute getAttribute(StringRef Kind) const;
+
+ MaybeAlign getAlignment() const;
+ MaybeAlign getStackAlignment() const;
+ uint64_t getDereferenceableBytes() const;
+ uint64_t getDereferenceableOrNullBytes() const;
+ std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
+ unsigned getVScaleRangeMin() const;
+ Optional<unsigned> getVScaleRangeMax() const;
+ UWTableKind getUWTableKind() const;
+ AllocFnKind getAllocKind() const;
+ std::string getAsString(bool InAttrGrp) const;
+ Type *getAttributeType(Attribute::AttrKind Kind) const;
+
+ using iterator = const Attribute *;
+
+ iterator begin() const { return getTrailingObjects<Attribute>(); }
+ iterator end() const { return begin() + NumAttrs; }
+
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, makeArrayRef(begin(), end()));
+ }
+
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<Attribute> AttrList) {
+ for (const auto &Attr : AttrList)
+ Attr.Profile(ID);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// This class represents a set of attributes that apply to the function,
+/// return type, and parameters.
+class AttributeListImpl final
+ : public FoldingSetNode,
+ private TrailingObjects<AttributeListImpl, AttributeSet> {
+ friend class AttributeList;
+ friend TrailingObjects;
+
+private:
+ unsigned NumAttrSets; ///< Number of entries in this set.
+ /// Available enum function attributes.
+ AttributeBitSet AvailableFunctionAttrs;
+ /// Union of enum attributes available at any index.
+ AttributeBitSet AvailableSomewhereAttrs;
+
+ // Helper fn for TrailingObjects class.
+ size_t numTrailingObjects(OverloadToken<AttributeSet>) { return NumAttrSets; }
+
+public:
+ AttributeListImpl(ArrayRef<AttributeSet> Sets);
+
+ // AttributesSetImpt is uniqued, these should not be available.
+ AttributeListImpl(const AttributeListImpl &) = delete;
+ AttributeListImpl &operator=(const AttributeListImpl &) = delete;
+
+ /// Return true if the AttributeSet or the FunctionIndex has an
+ /// enum attribute of the given kind.
+ bool hasFnAttribute(Attribute::AttrKind Kind) const {
+ return AvailableFunctionAttrs.hasAttribute(Kind);
+ }
+
+ /// Return true if the specified attribute is set for at least one
+ /// parameter or for the return value. If Index is not nullptr, the index
+ /// of a parameter with the specified attribute is provided.
+ bool hasAttrSomewhere(Attribute::AttrKind Kind,
+ unsigned *Index = nullptr) const;
+
+ using iterator = const AttributeSet *;
+
+ iterator begin() const { return getTrailingObjects<AttributeSet>(); }
+ iterator end() const { return begin() + NumAttrSets; }
+
+ void Profile(FoldingSetNodeID &ID) const;
+ static void Profile(FoldingSetNodeID &ID, ArrayRef<AttributeSet> Nodes);
+
+ void dump() const;
+};
+
+static_assert(std::is_trivially_destructible<AttributeListImpl>::value,
+ "AttributeListImpl should be trivially destructible");
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_IR_ATTRIBUTEIMPL_H
diff --git a/contrib/llvm-project/llvm/lib/IR/Attributes.cpp b/contrib/llvm-project/llvm/lib/IR/Attributes.cpp
new file mode 100644
index 000000000000..6d9f94b5eefd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Attributes.cpp
@@ -0,0 +1,2077 @@
+//===- Attributes.cpp - Implement AttributesList --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// This file implements the Attribute, AttributeImpl, AttrBuilder,
+// AttributeListImpl, and AttributeList classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Attributes.h"
+#include "AttributeImpl.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <tuple>
+#include <utility>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Attribute Construction Methods
+//===----------------------------------------------------------------------===//
+
+// allocsize has two integer arguments, but because they're both 32 bits, we can
+// pack them into one 64-bit value, at the cost of making said value
+// nonsensical.
+//
+// In order to do this, we need to reserve one value of the second (optional)
+// allocsize argument to signify "not present."
+static const unsigned AllocSizeNumElemsNotPresent = -1;
+
+static uint64_t packAllocSizeArgs(unsigned ElemSizeArg,
+ const Optional<unsigned> &NumElemsArg) {
+ assert((!NumElemsArg || *NumElemsArg != AllocSizeNumElemsNotPresent) &&
+ "Attempting to pack a reserved value");
+
+ return uint64_t(ElemSizeArg) << 32 |
+ NumElemsArg.value_or(AllocSizeNumElemsNotPresent);
+}
+
+static std::pair<unsigned, Optional<unsigned>>
+unpackAllocSizeArgs(uint64_t Num) {
+ unsigned NumElems = Num & std::numeric_limits<unsigned>::max();
+ unsigned ElemSizeArg = Num >> 32;
+
+ Optional<unsigned> NumElemsArg;
+ if (NumElems != AllocSizeNumElemsNotPresent)
+ NumElemsArg = NumElems;
+ return std::make_pair(ElemSizeArg, NumElemsArg);
+}
+
+static uint64_t packVScaleRangeArgs(unsigned MinValue,
+ Optional<unsigned> MaxValue) {
+ return uint64_t(MinValue) << 32 | MaxValue.value_or(0);
+}
+
+static std::pair<unsigned, Optional<unsigned>>
+unpackVScaleRangeArgs(uint64_t Value) {
+ unsigned MaxValue = Value & std::numeric_limits<unsigned>::max();
+ unsigned MinValue = Value >> 32;
+
+ return std::make_pair(MinValue,
+ MaxValue > 0 ? MaxValue : Optional<unsigned>());
+}
+
+Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind,
+ uint64_t Val) {
+ if (Val)
+ assert(Attribute::isIntAttrKind(Kind) && "Not an int attribute");
+ else
+ assert(Attribute::isEnumAttrKind(Kind) && "Not an enum attribute");
+
+ LLVMContextImpl *pImpl = Context.pImpl;
+ FoldingSetNodeID ID;
+ ID.AddInteger(Kind);
+ if (Val) ID.AddInteger(Val);
+
+ void *InsertPoint;
+ AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (!PA) {
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ if (!Val)
+ PA = new (pImpl->Alloc) EnumAttributeImpl(Kind);
+ else
+ PA = new (pImpl->Alloc) IntAttributeImpl(Kind, Val);
+ pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the Attribute that we found or created.
+ return Attribute(PA);
+}
+
+Attribute Attribute::get(LLVMContext &Context, StringRef Kind, StringRef Val) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ FoldingSetNodeID ID;
+ ID.AddString(Kind);
+ if (!Val.empty()) ID.AddString(Val);
+
+ void *InsertPoint;
+ AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (!PA) {
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ void *Mem =
+ pImpl->Alloc.Allocate(StringAttributeImpl::totalSizeToAlloc(Kind, Val),
+ alignof(StringAttributeImpl));
+ PA = new (Mem) StringAttributeImpl(Kind, Val);
+ pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the Attribute that we found or created.
+ return Attribute(PA);
+}
+
+Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind,
+ Type *Ty) {
+ assert(Attribute::isTypeAttrKind(Kind) && "Not a type attribute");
+ LLVMContextImpl *pImpl = Context.pImpl;
+ FoldingSetNodeID ID;
+ ID.AddInteger(Kind);
+ ID.AddPointer(Ty);
+
+ void *InsertPoint;
+ AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+ if (!PA) {
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ PA = new (pImpl->Alloc) TypeAttributeImpl(Kind, Ty);
+ pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the Attribute that we found or created.
+ return Attribute(PA);
+}
+
+Attribute Attribute::getWithAlignment(LLVMContext &Context, Align A) {
+ assert(A <= llvm::Value::MaximumAlignment && "Alignment too large.");
+ return get(Context, Alignment, A.value());
+}
+
+Attribute Attribute::getWithStackAlignment(LLVMContext &Context, Align A) {
+ assert(A <= 0x100 && "Alignment too large.");
+ return get(Context, StackAlignment, A.value());
+}
+
+Attribute Attribute::getWithDereferenceableBytes(LLVMContext &Context,
+ uint64_t Bytes) {
+ assert(Bytes && "Bytes must be non-zero.");
+ return get(Context, Dereferenceable, Bytes);
+}
+
+Attribute Attribute::getWithDereferenceableOrNullBytes(LLVMContext &Context,
+ uint64_t Bytes) {
+ assert(Bytes && "Bytes must be non-zero.");
+ return get(Context, DereferenceableOrNull, Bytes);
+}
+
+Attribute Attribute::getWithByValType(LLVMContext &Context, Type *Ty) {
+ return get(Context, ByVal, Ty);
+}
+
+Attribute Attribute::getWithStructRetType(LLVMContext &Context, Type *Ty) {
+ return get(Context, StructRet, Ty);
+}
+
+Attribute Attribute::getWithByRefType(LLVMContext &Context, Type *Ty) {
+ return get(Context, ByRef, Ty);
+}
+
+Attribute Attribute::getWithPreallocatedType(LLVMContext &Context, Type *Ty) {
+ return get(Context, Preallocated, Ty);
+}
+
+Attribute Attribute::getWithInAllocaType(LLVMContext &Context, Type *Ty) {
+ return get(Context, InAlloca, Ty);
+}
+
+Attribute Attribute::getWithUWTableKind(LLVMContext &Context,
+ UWTableKind Kind) {
+ return get(Context, UWTable, uint64_t(Kind));
+}
+
+Attribute
+Attribute::getWithAllocSizeArgs(LLVMContext &Context, unsigned ElemSizeArg,
+ const Optional<unsigned> &NumElemsArg) {
+ assert(!(ElemSizeArg == 0 && NumElemsArg && *NumElemsArg == 0) &&
+ "Invalid allocsize arguments -- given allocsize(0, 0)");
+ return get(Context, AllocSize, packAllocSizeArgs(ElemSizeArg, NumElemsArg));
+}
+
+Attribute Attribute::getWithVScaleRangeArgs(LLVMContext &Context,
+ unsigned MinValue,
+ unsigned MaxValue) {
+ return get(Context, VScaleRange, packVScaleRangeArgs(MinValue, MaxValue));
+}
+
+Attribute::AttrKind Attribute::getAttrKindFromName(StringRef AttrName) {
+ return StringSwitch<Attribute::AttrKind>(AttrName)
+#define GET_ATTR_NAMES
+#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME) \
+ .Case(#DISPLAY_NAME, Attribute::ENUM_NAME)
+#include "llvm/IR/Attributes.inc"
+ .Default(Attribute::None);
+}
+
+StringRef Attribute::getNameFromAttrKind(Attribute::AttrKind AttrKind) {
+ switch (AttrKind) {
+#define GET_ATTR_NAMES
+#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME) \
+ case Attribute::ENUM_NAME: \
+ return #DISPLAY_NAME;
+#include "llvm/IR/Attributes.inc"
+ case Attribute::None:
+ return "none";
+ default:
+ llvm_unreachable("invalid Kind");
+ }
+}
+
+bool Attribute::isExistingAttribute(StringRef Name) {
+ return StringSwitch<bool>(Name)
+#define GET_ATTR_NAMES
+#define ATTRIBUTE_ALL(ENUM_NAME, DISPLAY_NAME) .Case(#DISPLAY_NAME, true)
+#include "llvm/IR/Attributes.inc"
+ .Default(false);
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Accessor Methods
+//===----------------------------------------------------------------------===//
+
+bool Attribute::isEnumAttribute() const {
+ return pImpl && pImpl->isEnumAttribute();
+}
+
+bool Attribute::isIntAttribute() const {
+ return pImpl && pImpl->isIntAttribute();
+}
+
+bool Attribute::isStringAttribute() const {
+ return pImpl && pImpl->isStringAttribute();
+}
+
+bool Attribute::isTypeAttribute() const {
+ return pImpl && pImpl->isTypeAttribute();
+}
+
+Attribute::AttrKind Attribute::getKindAsEnum() const {
+ if (!pImpl) return None;
+ assert((isEnumAttribute() || isIntAttribute() || isTypeAttribute()) &&
+ "Invalid attribute type to get the kind as an enum!");
+ return pImpl->getKindAsEnum();
+}
+
+uint64_t Attribute::getValueAsInt() const {
+ if (!pImpl) return 0;
+ assert(isIntAttribute() &&
+ "Expected the attribute to be an integer attribute!");
+ return pImpl->getValueAsInt();
+}
+
+bool Attribute::getValueAsBool() const {
+ if (!pImpl) return false;
+ assert(isStringAttribute() &&
+ "Expected the attribute to be a string attribute!");
+ return pImpl->getValueAsBool();
+}
+
+StringRef Attribute::getKindAsString() const {
+ if (!pImpl) return {};
+ assert(isStringAttribute() &&
+ "Invalid attribute type to get the kind as a string!");
+ return pImpl->getKindAsString();
+}
+
+StringRef Attribute::getValueAsString() const {
+ if (!pImpl) return {};
+ assert(isStringAttribute() &&
+ "Invalid attribute type to get the value as a string!");
+ return pImpl->getValueAsString();
+}
+
+Type *Attribute::getValueAsType() const {
+ if (!pImpl) return {};
+ assert(isTypeAttribute() &&
+ "Invalid attribute type to get the value as a type!");
+ return pImpl->getValueAsType();
+}
+
+
+bool Attribute::hasAttribute(AttrKind Kind) const {
+ return (pImpl && pImpl->hasAttribute(Kind)) || (!pImpl && Kind == None);
+}
+
+bool Attribute::hasAttribute(StringRef Kind) const {
+ if (!isStringAttribute()) return false;
+ return pImpl && pImpl->hasAttribute(Kind);
+}
+
+MaybeAlign Attribute::getAlignment() const {
+ assert(hasAttribute(Attribute::Alignment) &&
+ "Trying to get alignment from non-alignment attribute!");
+ return MaybeAlign(pImpl->getValueAsInt());
+}
+
+MaybeAlign Attribute::getStackAlignment() const {
+ assert(hasAttribute(Attribute::StackAlignment) &&
+ "Trying to get alignment from non-alignment attribute!");
+ return MaybeAlign(pImpl->getValueAsInt());
+}
+
+uint64_t Attribute::getDereferenceableBytes() const {
+ assert(hasAttribute(Attribute::Dereferenceable) &&
+ "Trying to get dereferenceable bytes from "
+ "non-dereferenceable attribute!");
+ return pImpl->getValueAsInt();
+}
+
+uint64_t Attribute::getDereferenceableOrNullBytes() const {
+ assert(hasAttribute(Attribute::DereferenceableOrNull) &&
+ "Trying to get dereferenceable bytes from "
+ "non-dereferenceable attribute!");
+ return pImpl->getValueAsInt();
+}
+
+std::pair<unsigned, Optional<unsigned>> Attribute::getAllocSizeArgs() const {
+ assert(hasAttribute(Attribute::AllocSize) &&
+ "Trying to get allocsize args from non-allocsize attribute");
+ return unpackAllocSizeArgs(pImpl->getValueAsInt());
+}
+
+unsigned Attribute::getVScaleRangeMin() const {
+ assert(hasAttribute(Attribute::VScaleRange) &&
+ "Trying to get vscale args from non-vscale attribute");
+ return unpackVScaleRangeArgs(pImpl->getValueAsInt()).first;
+}
+
+Optional<unsigned> Attribute::getVScaleRangeMax() const {
+ assert(hasAttribute(Attribute::VScaleRange) &&
+ "Trying to get vscale args from non-vscale attribute");
+ return unpackVScaleRangeArgs(pImpl->getValueAsInt()).second;
+}
+
+UWTableKind Attribute::getUWTableKind() const {
+ assert(hasAttribute(Attribute::UWTable) &&
+ "Trying to get unwind table kind from non-uwtable attribute");
+ return UWTableKind(pImpl->getValueAsInt());
+}
+
+AllocFnKind Attribute::getAllocKind() const {
+ assert(hasAttribute(Attribute::AllocKind) &&
+ "Trying to get allockind value from non-allockind attribute");
+ return AllocFnKind(pImpl->getValueAsInt());
+}
+
+std::string Attribute::getAsString(bool InAttrGrp) const {
+ if (!pImpl) return {};
+
+ if (isEnumAttribute())
+ return getNameFromAttrKind(getKindAsEnum()).str();
+
+ if (isTypeAttribute()) {
+ std::string Result = getNameFromAttrKind(getKindAsEnum()).str();
+ Result += '(';
+ raw_string_ostream OS(Result);
+ getValueAsType()->print(OS, false, true);
+ OS.flush();
+ Result += ')';
+ return Result;
+ }
+
+ // FIXME: These should be output like this:
+ //
+ // align=4
+ // alignstack=8
+ //
+ if (hasAttribute(Attribute::Alignment))
+ return (InAttrGrp ? "align=" + Twine(getValueAsInt())
+ : "align " + Twine(getValueAsInt()))
+ .str();
+
+ auto AttrWithBytesToString = [&](const char *Name) {
+ return (InAttrGrp ? Name + ("=" + Twine(getValueAsInt()))
+ : Name + ("(" + Twine(getValueAsInt())) + ")")
+ .str();
+ };
+
+ if (hasAttribute(Attribute::StackAlignment))
+ return AttrWithBytesToString("alignstack");
+
+ if (hasAttribute(Attribute::Dereferenceable))
+ return AttrWithBytesToString("dereferenceable");
+
+ if (hasAttribute(Attribute::DereferenceableOrNull))
+ return AttrWithBytesToString("dereferenceable_or_null");
+
+ if (hasAttribute(Attribute::AllocSize)) {
+ unsigned ElemSize;
+ Optional<unsigned> NumElems;
+ std::tie(ElemSize, NumElems) = getAllocSizeArgs();
+
+ return (NumElems
+ ? "allocsize(" + Twine(ElemSize) + "," + Twine(*NumElems) + ")"
+ : "allocsize(" + Twine(ElemSize) + ")")
+ .str();
+ }
+
+ if (hasAttribute(Attribute::VScaleRange)) {
+ unsigned MinValue = getVScaleRangeMin();
+ Optional<unsigned> MaxValue = getVScaleRangeMax();
+ return ("vscale_range(" + Twine(MinValue) + "," +
+ Twine(MaxValue.value_or(0)) + ")")
+ .str();
+ }
+
+ if (hasAttribute(Attribute::UWTable)) {
+ UWTableKind Kind = getUWTableKind();
+ if (Kind != UWTableKind::None) {
+ return Kind == UWTableKind::Default
+ ? "uwtable"
+ : ("uwtable(" +
+ Twine(Kind == UWTableKind::Sync ? "sync" : "async") + ")")
+ .str();
+ }
+ }
+
+ if (hasAttribute(Attribute::AllocKind)) {
+ AllocFnKind Kind = getAllocKind();
+ SmallVector<StringRef> parts;
+ if ((Kind & AllocFnKind::Alloc) != AllocFnKind::Unknown)
+ parts.push_back("alloc");
+ if ((Kind & AllocFnKind::Realloc) != AllocFnKind::Unknown)
+ parts.push_back("realloc");
+ if ((Kind & AllocFnKind::Free) != AllocFnKind::Unknown)
+ parts.push_back("free");
+ if ((Kind & AllocFnKind::Uninitialized) != AllocFnKind::Unknown)
+ parts.push_back("uninitialized");
+ if ((Kind & AllocFnKind::Zeroed) != AllocFnKind::Unknown)
+ parts.push_back("zeroed");
+ if ((Kind & AllocFnKind::Aligned) != AllocFnKind::Unknown)
+ parts.push_back("aligned");
+ return ("allockind(\"" +
+ Twine(llvm::join(parts.begin(), parts.end(), ",")) + "\")")
+ .str();
+ }
+
+ // Convert target-dependent attributes to strings of the form:
+ //
+ // "kind"
+ // "kind" = "value"
+ //
+ if (isStringAttribute()) {
+ std::string Result;
+ {
+ raw_string_ostream OS(Result);
+ OS << '"' << getKindAsString() << '"';
+
+ // Since some attribute strings contain special characters that cannot be
+ // printable, those have to be escaped to make the attribute value
+ // printable as is. e.g. "\01__gnu_mcount_nc"
+ const auto &AttrVal = pImpl->getValueAsString();
+ if (!AttrVal.empty()) {
+ OS << "=\"";
+ printEscapedString(AttrVal, OS);
+ OS << "\"";
+ }
+ }
+ return Result;
+ }
+
+ llvm_unreachable("Unknown attribute");
+}
+
+bool Attribute::hasParentContext(LLVMContext &C) const {
+ assert(isValid() && "invalid Attribute doesn't refer to any context");
+ FoldingSetNodeID ID;
+ pImpl->Profile(ID);
+ void *Unused;
+ return C.pImpl->AttrsSet.FindNodeOrInsertPos(ID, Unused) == pImpl;
+}
+
+bool Attribute::operator<(Attribute A) const {
+ if (!pImpl && !A.pImpl) return false;
+ if (!pImpl) return true;
+ if (!A.pImpl) return false;
+ return *pImpl < *A.pImpl;
+}
+
+void Attribute::Profile(FoldingSetNodeID &ID) const {
+ ID.AddPointer(pImpl);
+}
+
+enum AttributeProperty {
+ FnAttr = (1 << 0),
+ ParamAttr = (1 << 1),
+ RetAttr = (1 << 2),
+};
+
+#define GET_ATTR_PROP_TABLE
+#include "llvm/IR/Attributes.inc"
+
+static bool hasAttributeProperty(Attribute::AttrKind Kind,
+ AttributeProperty Prop) {
+ unsigned Index = Kind - 1;
+ assert(Index < sizeof(AttrPropTable) / sizeof(AttrPropTable[0]) &&
+ "Invalid attribute kind");
+ return AttrPropTable[Index] & Prop;
+}
+
+bool Attribute::canUseAsFnAttr(AttrKind Kind) {
+ return hasAttributeProperty(Kind, AttributeProperty::FnAttr);
+}
+
+bool Attribute::canUseAsParamAttr(AttrKind Kind) {
+ return hasAttributeProperty(Kind, AttributeProperty::ParamAttr);
+}
+
+bool Attribute::canUseAsRetAttr(AttrKind Kind) {
+ return hasAttributeProperty(Kind, AttributeProperty::RetAttr);
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeImpl Definition
+//===----------------------------------------------------------------------===//
+
+bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const {
+ if (isStringAttribute()) return false;
+ return getKindAsEnum() == A;
+}
+
+bool AttributeImpl::hasAttribute(StringRef Kind) const {
+ if (!isStringAttribute()) return false;
+ return getKindAsString() == Kind;
+}
+
+Attribute::AttrKind AttributeImpl::getKindAsEnum() const {
+ assert(isEnumAttribute() || isIntAttribute() || isTypeAttribute());
+ return static_cast<const EnumAttributeImpl *>(this)->getEnumKind();
+}
+
+uint64_t AttributeImpl::getValueAsInt() const {
+ assert(isIntAttribute());
+ return static_cast<const IntAttributeImpl *>(this)->getValue();
+}
+
+bool AttributeImpl::getValueAsBool() const {
+ assert(getValueAsString().empty() || getValueAsString() == "false" || getValueAsString() == "true");
+ return getValueAsString() == "true";
+}
+
+StringRef AttributeImpl::getKindAsString() const {
+ assert(isStringAttribute());
+ return static_cast<const StringAttributeImpl *>(this)->getStringKind();
+}
+
+StringRef AttributeImpl::getValueAsString() const {
+ assert(isStringAttribute());
+ return static_cast<const StringAttributeImpl *>(this)->getStringValue();
+}
+
+Type *AttributeImpl::getValueAsType() const {
+ assert(isTypeAttribute());
+ return static_cast<const TypeAttributeImpl *>(this)->getTypeValue();
+}
+
+bool AttributeImpl::operator<(const AttributeImpl &AI) const {
+ if (this == &AI)
+ return false;
+
+ // This sorts the attributes with Attribute::AttrKinds coming first (sorted
+ // relative to their enum value) and then strings.
+ if (!isStringAttribute()) {
+ if (AI.isStringAttribute())
+ return true;
+ if (getKindAsEnum() != AI.getKindAsEnum())
+ return getKindAsEnum() < AI.getKindAsEnum();
+ assert(!AI.isEnumAttribute() && "Non-unique attribute");
+ assert(!AI.isTypeAttribute() && "Comparison of types would be unstable");
+ // TODO: Is this actually needed?
+ assert(AI.isIntAttribute() && "Only possibility left");
+ return getValueAsInt() < AI.getValueAsInt();
+ }
+
+ if (!AI.isStringAttribute())
+ return false;
+ if (getKindAsString() == AI.getKindAsString())
+ return getValueAsString() < AI.getValueAsString();
+ return getKindAsString() < AI.getKindAsString();
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Definition
+//===----------------------------------------------------------------------===//
+
+AttributeSet AttributeSet::get(LLVMContext &C, const AttrBuilder &B) {
+ return AttributeSet(AttributeSetNode::get(C, B));
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<Attribute> Attrs) {
+ return AttributeSet(AttributeSetNode::get(C, Attrs));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C,
+ Attribute::AttrKind Kind) const {
+ if (hasAttribute(Kind)) return *this;
+ AttrBuilder B(C);
+ B.addAttribute(Kind);
+ return addAttributes(C, AttributeSet::get(C, B));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, StringRef Kind,
+ StringRef Value) const {
+ AttrBuilder B(C);
+ B.addAttribute(Kind, Value);
+ return addAttributes(C, AttributeSet::get(C, B));
+}
+
+AttributeSet AttributeSet::addAttributes(LLVMContext &C,
+ const AttributeSet AS) const {
+ if (!hasAttributes())
+ return AS;
+
+ if (!AS.hasAttributes())
+ return *this;
+
+ AttrBuilder B(C, *this);
+ B.merge(AttrBuilder(C, AS));
+ return get(C, B);
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
+ Attribute::AttrKind Kind) const {
+ if (!hasAttribute(Kind)) return *this;
+ AttrBuilder B(C, *this);
+ B.removeAttribute(Kind);
+ return get(C, B);
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C,
+ StringRef Kind) const {
+ if (!hasAttribute(Kind)) return *this;
+ AttrBuilder B(C, *this);
+ B.removeAttribute(Kind);
+ return get(C, B);
+}
+
+AttributeSet AttributeSet::removeAttributes(LLVMContext &C,
+ const AttributeMask &Attrs) const {
+ AttrBuilder B(C, *this);
+ // If there is nothing to remove, directly return the original set.
+ if (!B.overlaps(Attrs))
+ return *this;
+
+ B.remove(Attrs);
+ return get(C, B);
+}
+
+unsigned AttributeSet::getNumAttributes() const {
+ return SetNode ? SetNode->getNumAttributes() : 0;
+}
+
+bool AttributeSet::hasAttribute(Attribute::AttrKind Kind) const {
+ return SetNode ? SetNode->hasAttribute(Kind) : false;
+}
+
+bool AttributeSet::hasAttribute(StringRef Kind) const {
+ return SetNode ? SetNode->hasAttribute(Kind) : false;
+}
+
+Attribute AttributeSet::getAttribute(Attribute::AttrKind Kind) const {
+ return SetNode ? SetNode->getAttribute(Kind) : Attribute();
+}
+
+Attribute AttributeSet::getAttribute(StringRef Kind) const {
+ return SetNode ? SetNode->getAttribute(Kind) : Attribute();
+}
+
+MaybeAlign AttributeSet::getAlignment() const {
+ return SetNode ? SetNode->getAlignment() : None;
+}
+
+MaybeAlign AttributeSet::getStackAlignment() const {
+ return SetNode ? SetNode->getStackAlignment() : None;
+}
+
+uint64_t AttributeSet::getDereferenceableBytes() const {
+ return SetNode ? SetNode->getDereferenceableBytes() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableOrNullBytes() const {
+ return SetNode ? SetNode->getDereferenceableOrNullBytes() : 0;
+}
+
+Type *AttributeSet::getByRefType() const {
+ return SetNode ? SetNode->getAttributeType(Attribute::ByRef) : nullptr;
+}
+
+Type *AttributeSet::getByValType() const {
+ return SetNode ? SetNode->getAttributeType(Attribute::ByVal) : nullptr;
+}
+
+Type *AttributeSet::getStructRetType() const {
+ return SetNode ? SetNode->getAttributeType(Attribute::StructRet) : nullptr;
+}
+
+Type *AttributeSet::getPreallocatedType() const {
+ return SetNode ? SetNode->getAttributeType(Attribute::Preallocated) : nullptr;
+}
+
+Type *AttributeSet::getInAllocaType() const {
+ return SetNode ? SetNode->getAttributeType(Attribute::InAlloca) : nullptr;
+}
+
+Type *AttributeSet::getElementType() const {
+ return SetNode ? SetNode->getAttributeType(Attribute::ElementType) : nullptr;
+}
+
+std::pair<unsigned, Optional<unsigned>> AttributeSet::getAllocSizeArgs() const {
+ return SetNode ? SetNode->getAllocSizeArgs()
+ : std::pair<unsigned, Optional<unsigned>>(0, 0);
+}
+
+unsigned AttributeSet::getVScaleRangeMin() const {
+ return SetNode ? SetNode->getVScaleRangeMin() : 1;
+}
+
+Optional<unsigned> AttributeSet::getVScaleRangeMax() const {
+ return SetNode ? SetNode->getVScaleRangeMax() : None;
+}
+
+UWTableKind AttributeSet::getUWTableKind() const {
+ return SetNode ? SetNode->getUWTableKind() : UWTableKind::None;
+}
+
+AllocFnKind AttributeSet::getAllocKind() const {
+ return SetNode ? SetNode->getAllocKind() : AllocFnKind::Unknown;
+}
+
+std::string AttributeSet::getAsString(bool InAttrGrp) const {
+ return SetNode ? SetNode->getAsString(InAttrGrp) : "";
+}
+
+bool AttributeSet::hasParentContext(LLVMContext &C) const {
+ assert(hasAttributes() && "empty AttributeSet doesn't refer to any context");
+ FoldingSetNodeID ID;
+ SetNode->Profile(ID);
+ void *Unused;
+ return C.pImpl->AttrsSetNodes.FindNodeOrInsertPos(ID, Unused) == SetNode;
+}
+
+AttributeSet::iterator AttributeSet::begin() const {
+ return SetNode ? SetNode->begin() : nullptr;
+}
+
+AttributeSet::iterator AttributeSet::end() const {
+ return SetNode ? SetNode->end() : nullptr;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void AttributeSet::dump() const {
+ dbgs() << "AS =\n";
+ dbgs() << " { ";
+ dbgs() << getAsString(true) << " }\n";
+}
+#endif
+
+//===----------------------------------------------------------------------===//
+// AttributeSetNode Definition
+//===----------------------------------------------------------------------===//
+
+AttributeSetNode::AttributeSetNode(ArrayRef<Attribute> Attrs)
+ : NumAttrs(Attrs.size()) {
+ // There's memory after the node where we can store the entries in.
+ llvm::copy(Attrs, getTrailingObjects<Attribute>());
+
+ for (const auto &I : *this) {
+ if (I.isStringAttribute())
+ StringAttrs.insert({ I.getKindAsString(), I });
+ else
+ AvailableAttrs.addAttribute(I.getKindAsEnum());
+ }
+}
+
+AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
+ ArrayRef<Attribute> Attrs) {
+ SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end());
+ llvm::sort(SortedAttrs);
+ return getSorted(C, SortedAttrs);
+}
+
+AttributeSetNode *AttributeSetNode::getSorted(LLVMContext &C,
+ ArrayRef<Attribute> SortedAttrs) {
+ if (SortedAttrs.empty())
+ return nullptr;
+
+ // Build a key to look up the existing attributes.
+ LLVMContextImpl *pImpl = C.pImpl;
+ FoldingSetNodeID ID;
+
+ assert(llvm::is_sorted(SortedAttrs) && "Expected sorted attributes!");
+ for (const auto &Attr : SortedAttrs)
+ Attr.Profile(ID);
+
+ void *InsertPoint;
+ AttributeSetNode *PA =
+ pImpl->AttrsSetNodes.FindNodeOrInsertPos(ID, InsertPoint);
+
+ // If we didn't find any existing attributes of the same shape then create a
+ // new one and insert it.
+ if (!PA) {
+ // Coallocate entries after the AttributeSetNode itself.
+ void *Mem = ::operator new(totalSizeToAlloc<Attribute>(SortedAttrs.size()));
+ PA = new (Mem) AttributeSetNode(SortedAttrs);
+ pImpl->AttrsSetNodes.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the AttributeSetNode that we found or created.
+ return PA;
+}
+
+AttributeSetNode *AttributeSetNode::get(LLVMContext &C, const AttrBuilder &B) {
+ return getSorted(C, B.attrs());
+}
+
+bool AttributeSetNode::hasAttribute(StringRef Kind) const {
+ return StringAttrs.count(Kind);
+}
+
+Optional<Attribute>
+AttributeSetNode::findEnumAttribute(Attribute::AttrKind Kind) const {
+ // Do a quick presence check.
+ if (!hasAttribute(Kind))
+ return None;
+
+ // Attributes in a set are sorted by enum value, followed by string
+ // attributes. Binary search the one we want.
+ const Attribute *I =
+ std::lower_bound(begin(), end() - StringAttrs.size(), Kind,
+ [](Attribute A, Attribute::AttrKind Kind) {
+ return A.getKindAsEnum() < Kind;
+ });
+ assert(I != end() && I->hasAttribute(Kind) && "Presence check failed?");
+ return *I;
+}
+
+Attribute AttributeSetNode::getAttribute(Attribute::AttrKind Kind) const {
+ if (auto A = findEnumAttribute(Kind))
+ return *A;
+ return {};
+}
+
+Attribute AttributeSetNode::getAttribute(StringRef Kind) const {
+ return StringAttrs.lookup(Kind);
+}
+
+MaybeAlign AttributeSetNode::getAlignment() const {
+ if (auto A = findEnumAttribute(Attribute::Alignment))
+ return A->getAlignment();
+ return None;
+}
+
+MaybeAlign AttributeSetNode::getStackAlignment() const {
+ if (auto A = findEnumAttribute(Attribute::StackAlignment))
+ return A->getStackAlignment();
+ return None;
+}
+
+Type *AttributeSetNode::getAttributeType(Attribute::AttrKind Kind) const {
+ if (auto A = findEnumAttribute(Kind))
+ return A->getValueAsType();
+ return nullptr;
+}
+
+uint64_t AttributeSetNode::getDereferenceableBytes() const {
+ if (auto A = findEnumAttribute(Attribute::Dereferenceable))
+ return A->getDereferenceableBytes();
+ return 0;
+}
+
+uint64_t AttributeSetNode::getDereferenceableOrNullBytes() const {
+ if (auto A = findEnumAttribute(Attribute::DereferenceableOrNull))
+ return A->getDereferenceableOrNullBytes();
+ return 0;
+}
+
+std::pair<unsigned, Optional<unsigned>>
+AttributeSetNode::getAllocSizeArgs() const {
+ if (auto A = findEnumAttribute(Attribute::AllocSize))
+ return A->getAllocSizeArgs();
+ return std::make_pair(0, 0);
+}
+
+unsigned AttributeSetNode::getVScaleRangeMin() const {
+ if (auto A = findEnumAttribute(Attribute::VScaleRange))
+ return A->getVScaleRangeMin();
+ return 1;
+}
+
+Optional<unsigned> AttributeSetNode::getVScaleRangeMax() const {
+ if (auto A = findEnumAttribute(Attribute::VScaleRange))
+ return A->getVScaleRangeMax();
+ return None;
+}
+
+UWTableKind AttributeSetNode::getUWTableKind() const {
+ if (auto A = findEnumAttribute(Attribute::UWTable))
+ return A->getUWTableKind();
+ return UWTableKind::None;
+}
+
+AllocFnKind AttributeSetNode::getAllocKind() const {
+ if (auto A = findEnumAttribute(Attribute::AllocKind))
+ return A->getAllocKind();
+ return AllocFnKind::Unknown;
+}
+
+std::string AttributeSetNode::getAsString(bool InAttrGrp) const {
+ std::string Str;
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I != begin())
+ Str += ' ';
+ Str += I->getAsString(InAttrGrp);
+ }
+ return Str;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeListImpl Definition
+//===----------------------------------------------------------------------===//
+
+/// Map from AttributeList index to the internal array index. Adding one happens
+/// to work, because -1 wraps around to 0.
+static unsigned attrIdxToArrayIdx(unsigned Index) {
+ return Index + 1;
+}
+
+AttributeListImpl::AttributeListImpl(ArrayRef<AttributeSet> Sets)
+ : NumAttrSets(Sets.size()) {
+ assert(!Sets.empty() && "pointless AttributeListImpl");
+
+ // There's memory after the node where we can store the entries in.
+ llvm::copy(Sets, getTrailingObjects<AttributeSet>());
+
+ // Initialize AvailableFunctionAttrs and AvailableSomewhereAttrs
+ // summary bitsets.
+ for (const auto &I : Sets[attrIdxToArrayIdx(AttributeList::FunctionIndex)])
+ if (!I.isStringAttribute())
+ AvailableFunctionAttrs.addAttribute(I.getKindAsEnum());
+
+ for (const auto &Set : Sets)
+ for (const auto &I : Set)
+ if (!I.isStringAttribute())
+ AvailableSomewhereAttrs.addAttribute(I.getKindAsEnum());
+}
+
+void AttributeListImpl::Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, makeArrayRef(begin(), end()));
+}
+
+void AttributeListImpl::Profile(FoldingSetNodeID &ID,
+ ArrayRef<AttributeSet> Sets) {
+ for (const auto &Set : Sets)
+ ID.AddPointer(Set.SetNode);
+}
+
+bool AttributeListImpl::hasAttrSomewhere(Attribute::AttrKind Kind,
+ unsigned *Index) const {
+ if (!AvailableSomewhereAttrs.hasAttribute(Kind))
+ return false;
+
+ if (Index) {
+ for (unsigned I = 0, E = NumAttrSets; I != E; ++I) {
+ if (begin()[I].hasAttribute(Kind)) {
+ *Index = I - 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void AttributeListImpl::dump() const {
+ AttributeList(const_cast<AttributeListImpl *>(this)).dump();
+}
+#endif
+
+//===----------------------------------------------------------------------===//
+// AttributeList Construction and Mutation Methods
+//===----------------------------------------------------------------------===//
+
+AttributeList AttributeList::getImpl(LLVMContext &C,
+ ArrayRef<AttributeSet> AttrSets) {
+ assert(!AttrSets.empty() && "pointless AttributeListImpl");
+
+ LLVMContextImpl *pImpl = C.pImpl;
+ FoldingSetNodeID ID;
+ AttributeListImpl::Profile(ID, AttrSets);
+
+ void *InsertPoint;
+ AttributeListImpl *PA =
+ pImpl->AttrsLists.FindNodeOrInsertPos(ID, InsertPoint);
+
+ // If we didn't find any existing attributes of the same shape then
+ // create a new one and insert it.
+ if (!PA) {
+ // Coallocate entries after the AttributeListImpl itself.
+ void *Mem = pImpl->Alloc.Allocate(
+ AttributeListImpl::totalSizeToAlloc<AttributeSet>(AttrSets.size()),
+ alignof(AttributeListImpl));
+ PA = new (Mem) AttributeListImpl(AttrSets);
+ pImpl->AttrsLists.InsertNode(PA, InsertPoint);
+ }
+
+ // Return the AttributesList that we found or created.
+ return AttributeList(PA);
+}
+
+AttributeList
+AttributeList::get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, Attribute>> Attrs) {
+ // If there are no attributes then return a null AttributesList pointer.
+ if (Attrs.empty())
+ return {};
+
+ assert(llvm::is_sorted(Attrs, llvm::less_first()) &&
+ "Misordered Attributes list!");
+ assert(llvm::all_of(Attrs,
+ [](const std::pair<unsigned, Attribute> &Pair) {
+ return Pair.second.isValid();
+ }) &&
+ "Pointless attribute!");
+
+ // Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes
+ // list.
+ SmallVector<std::pair<unsigned, AttributeSet>, 8> AttrPairVec;
+ for (ArrayRef<std::pair<unsigned, Attribute>>::iterator I = Attrs.begin(),
+ E = Attrs.end(); I != E; ) {
+ unsigned Index = I->first;
+ SmallVector<Attribute, 4> AttrVec;
+ while (I != E && I->first == Index) {
+ AttrVec.push_back(I->second);
+ ++I;
+ }
+
+ AttrPairVec.emplace_back(Index, AttributeSet::get(C, AttrVec));
+ }
+
+ return get(C, AttrPairVec);
+}
+
+AttributeList
+AttributeList::get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, AttributeSet>> Attrs) {
+ // If there are no attributes then return a null AttributesList pointer.
+ if (Attrs.empty())
+ return {};
+
+ assert(llvm::is_sorted(Attrs, llvm::less_first()) &&
+ "Misordered Attributes list!");
+ assert(llvm::none_of(Attrs,
+ [](const std::pair<unsigned, AttributeSet> &Pair) {
+ return !Pair.second.hasAttributes();
+ }) &&
+ "Pointless attribute!");
+
+ unsigned MaxIndex = Attrs.back().first;
+ // If the MaxIndex is FunctionIndex and there are other indices in front
+ // of it, we need to use the largest of those to get the right size.
+ if (MaxIndex == FunctionIndex && Attrs.size() > 1)
+ MaxIndex = Attrs[Attrs.size() - 2].first;
+
+ SmallVector<AttributeSet, 4> AttrVec(attrIdxToArrayIdx(MaxIndex) + 1);
+ for (const auto &Pair : Attrs)
+ AttrVec[attrIdxToArrayIdx(Pair.first)] = Pair.second;
+
+ return getImpl(C, AttrVec);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, AttributeSet FnAttrs,
+ AttributeSet RetAttrs,
+ ArrayRef<AttributeSet> ArgAttrs) {
+ // Scan from the end to find the last argument with attributes. Most
+ // arguments don't have attributes, so it's nice if we can have fewer unique
+ // AttributeListImpls by dropping empty attribute sets at the end of the list.
+ unsigned NumSets = 0;
+ for (size_t I = ArgAttrs.size(); I != 0; --I) {
+ if (ArgAttrs[I - 1].hasAttributes()) {
+ NumSets = I + 2;
+ break;
+ }
+ }
+ if (NumSets == 0) {
+ // Check function and return attributes if we didn't have argument
+ // attributes.
+ if (RetAttrs.hasAttributes())
+ NumSets = 2;
+ else if (FnAttrs.hasAttributes())
+ NumSets = 1;
+ }
+
+ // If all attribute sets were empty, we can use the empty attribute list.
+ if (NumSets == 0)
+ return {};
+
+ SmallVector<AttributeSet, 8> AttrSets;
+ AttrSets.reserve(NumSets);
+ // If we have any attributes, we always have function attributes.
+ AttrSets.push_back(FnAttrs);
+ if (NumSets > 1)
+ AttrSets.push_back(RetAttrs);
+ if (NumSets > 2) {
+ // Drop the empty argument attribute sets at the end.
+ ArgAttrs = ArgAttrs.take_front(NumSets - 2);
+ llvm::append_range(AttrSets, ArgAttrs);
+ }
+
+ return getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ AttributeSet Attrs) {
+ if (!Attrs.hasAttributes())
+ return {};
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 8> AttrSets(Index + 1);
+ AttrSets[Index] = Attrs;
+ return getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ const AttrBuilder &B) {
+ return get(C, Index, AttributeSet::get(C, B));
+}
+
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ ArrayRef<Attribute::AttrKind> Kinds) {
+ SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+ for (const auto K : Kinds)
+ Attrs.emplace_back(Index, Attribute::get(C, K));
+ return get(C, Attrs);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ ArrayRef<Attribute::AttrKind> Kinds,
+ ArrayRef<uint64_t> Values) {
+ assert(Kinds.size() == Values.size() && "Mismatched attribute values.");
+ SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+ auto VI = Values.begin();
+ for (const auto K : Kinds)
+ Attrs.emplace_back(Index, Attribute::get(C, K, *VI++));
+ return get(C, Attrs);
+}
+
+AttributeList AttributeList::get(LLVMContext &C, unsigned Index,
+ ArrayRef<StringRef> Kinds) {
+ SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+ for (const auto &K : Kinds)
+ Attrs.emplace_back(Index, Attribute::get(C, K));
+ return get(C, Attrs);
+}
+
+AttributeList AttributeList::get(LLVMContext &C,
+ ArrayRef<AttributeList> Attrs) {
+ if (Attrs.empty())
+ return {};
+ if (Attrs.size() == 1)
+ return Attrs[0];
+
+ unsigned MaxSize = 0;
+ for (const auto &List : Attrs)
+ MaxSize = std::max(MaxSize, List.getNumAttrSets());
+
+ // If every list was empty, there is no point in merging the lists.
+ if (MaxSize == 0)
+ return {};
+
+ SmallVector<AttributeSet, 8> NewAttrSets(MaxSize);
+ for (unsigned I = 0; I < MaxSize; ++I) {
+ AttrBuilder CurBuilder(C);
+ for (const auto &List : Attrs)
+ CurBuilder.merge(AttrBuilder(C, List.getAttributes(I - 1)));
+ NewAttrSets[I] = AttributeSet::get(C, CurBuilder);
+ }
+
+ return getImpl(C, NewAttrSets);
+}
+
+AttributeList
+AttributeList::addAttributeAtIndex(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Kind) const {
+ if (hasAttributeAtIndex(Index, Kind))
+ return *this;
+ AttributeSet Attrs = getAttributes(Index);
+ // TODO: Insert at correct position and avoid sort.
+ SmallVector<Attribute, 8> NewAttrs(Attrs.begin(), Attrs.end());
+ NewAttrs.push_back(Attribute::get(C, Kind));
+ return setAttributesAtIndex(C, Index, AttributeSet::get(C, NewAttrs));
+}
+
+AttributeList AttributeList::addAttributeAtIndex(LLVMContext &C, unsigned Index,
+ StringRef Kind,
+ StringRef Value) const {
+ AttrBuilder B(C);
+ B.addAttribute(Kind, Value);
+ return addAttributesAtIndex(C, Index, B);
+}
+
+AttributeList AttributeList::addAttributeAtIndex(LLVMContext &C, unsigned Index,
+ Attribute A) const {
+ AttrBuilder B(C);
+ B.addAttribute(A);
+ return addAttributesAtIndex(C, Index, B);
+}
+
+AttributeList AttributeList::setAttributesAtIndex(LLVMContext &C,
+ unsigned Index,
+ AttributeSet Attrs) const {
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ if (Index >= AttrSets.size())
+ AttrSets.resize(Index + 1);
+ AttrSets[Index] = Attrs;
+ return AttributeList::getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::addAttributesAtIndex(LLVMContext &C,
+ unsigned Index,
+ const AttrBuilder &B) const {
+ if (!B.hasAttributes())
+ return *this;
+
+ if (!pImpl)
+ return AttributeList::get(C, {{Index, AttributeSet::get(C, B)}});
+
+ AttrBuilder Merged(C, getAttributes(Index));
+ Merged.merge(B);
+ return setAttributesAtIndex(C, Index, AttributeSet::get(C, Merged));
+}
+
+AttributeList AttributeList::addParamAttribute(LLVMContext &C,
+ ArrayRef<unsigned> ArgNos,
+ Attribute A) const {
+ assert(llvm::is_sorted(ArgNos));
+
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ unsigned MaxIndex = attrIdxToArrayIdx(ArgNos.back() + FirstArgIndex);
+ if (MaxIndex >= AttrSets.size())
+ AttrSets.resize(MaxIndex + 1);
+
+ for (unsigned ArgNo : ArgNos) {
+ unsigned Index = attrIdxToArrayIdx(ArgNo + FirstArgIndex);
+ AttrBuilder B(C, AttrSets[Index]);
+ B.addAttribute(A);
+ AttrSets[Index] = AttributeSet::get(C, B);
+ }
+
+ return getImpl(C, AttrSets);
+}
+
+AttributeList
+AttributeList::removeAttributeAtIndex(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Kind) const {
+ if (!hasAttributeAtIndex(Index, Kind))
+ return *this;
+
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ assert(Index < AttrSets.size());
+
+ AttrSets[Index] = AttrSets[Index].removeAttribute(C, Kind);
+
+ return getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::removeAttributeAtIndex(LLVMContext &C,
+ unsigned Index,
+ StringRef Kind) const {
+ if (!hasAttributeAtIndex(Index, Kind))
+ return *this;
+
+ Index = attrIdxToArrayIdx(Index);
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ assert(Index < AttrSets.size());
+
+ AttrSets[Index] = AttrSets[Index].removeAttribute(C, Kind);
+
+ return getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::removeAttributesAtIndex(
+ LLVMContext &C, unsigned Index, const AttributeMask &AttrsToRemove) const {
+ AttributeSet Attrs = getAttributes(Index);
+ AttributeSet NewAttrs = Attrs.removeAttributes(C, AttrsToRemove);
+ // If nothing was removed, return the original list.
+ if (Attrs == NewAttrs)
+ return *this;
+ return setAttributesAtIndex(C, Index, NewAttrs);
+}
+
+AttributeList
+AttributeList::removeAttributesAtIndex(LLVMContext &C,
+ unsigned WithoutIndex) const {
+ if (!pImpl)
+ return {};
+ WithoutIndex = attrIdxToArrayIdx(WithoutIndex);
+ if (WithoutIndex >= getNumAttrSets())
+ return *this;
+ SmallVector<AttributeSet, 4> AttrSets(this->begin(), this->end());
+ AttrSets[WithoutIndex] = AttributeSet();
+ return getImpl(C, AttrSets);
+}
+
+AttributeList AttributeList::addDereferenceableRetAttr(LLVMContext &C,
+ uint64_t Bytes) const {
+ AttrBuilder B(C);
+ B.addDereferenceableAttr(Bytes);
+ return addRetAttributes(C, B);
+}
+
+AttributeList AttributeList::addDereferenceableParamAttr(LLVMContext &C,
+ unsigned Index,
+ uint64_t Bytes) const {
+ AttrBuilder B(C);
+ B.addDereferenceableAttr(Bytes);
+ return addParamAttributes(C, Index, B);
+}
+
+AttributeList
+AttributeList::addDereferenceableOrNullParamAttr(LLVMContext &C, unsigned Index,
+ uint64_t Bytes) const {
+ AttrBuilder B(C);
+ B.addDereferenceableOrNullAttr(Bytes);
+ return addParamAttributes(C, Index, B);
+}
+
+AttributeList
+AttributeList::addAllocSizeParamAttr(LLVMContext &C, unsigned Index,
+ unsigned ElemSizeArg,
+ const Optional<unsigned> &NumElemsArg) {
+ AttrBuilder B(C);
+ B.addAllocSizeAttr(ElemSizeArg, NumElemsArg);
+ return addParamAttributes(C, Index, B);
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeList Accessor Methods
+//===----------------------------------------------------------------------===//
+
+AttributeSet AttributeList::getParamAttrs(unsigned ArgNo) const {
+ return getAttributes(ArgNo + FirstArgIndex);
+}
+
+AttributeSet AttributeList::getRetAttrs() const {
+ return getAttributes(ReturnIndex);
+}
+
+AttributeSet AttributeList::getFnAttrs() const {
+ return getAttributes(FunctionIndex);
+}
+
+bool AttributeList::hasAttributeAtIndex(unsigned Index,
+ Attribute::AttrKind Kind) const {
+ return getAttributes(Index).hasAttribute(Kind);
+}
+
+bool AttributeList::hasAttributeAtIndex(unsigned Index, StringRef Kind) const {
+ return getAttributes(Index).hasAttribute(Kind);
+}
+
+bool AttributeList::hasAttributesAtIndex(unsigned Index) const {
+ return getAttributes(Index).hasAttributes();
+}
+
+bool AttributeList::hasFnAttr(Attribute::AttrKind Kind) const {
+ return pImpl && pImpl->hasFnAttribute(Kind);
+}
+
+bool AttributeList::hasFnAttr(StringRef Kind) const {
+ return hasAttributeAtIndex(AttributeList::FunctionIndex, Kind);
+}
+
+bool AttributeList::hasAttrSomewhere(Attribute::AttrKind Attr,
+ unsigned *Index) const {
+ return pImpl && pImpl->hasAttrSomewhere(Attr, Index);
+}
+
+Attribute AttributeList::getAttributeAtIndex(unsigned Index,
+ Attribute::AttrKind Kind) const {
+ return getAttributes(Index).getAttribute(Kind);
+}
+
+Attribute AttributeList::getAttributeAtIndex(unsigned Index,
+ StringRef Kind) const {
+ return getAttributes(Index).getAttribute(Kind);
+}
+
+MaybeAlign AttributeList::getRetAlignment() const {
+ return getAttributes(ReturnIndex).getAlignment();
+}
+
+MaybeAlign AttributeList::getParamAlignment(unsigned ArgNo) const {
+ return getAttributes(ArgNo + FirstArgIndex).getAlignment();
+}
+
+MaybeAlign AttributeList::getParamStackAlignment(unsigned ArgNo) const {
+ return getAttributes(ArgNo + FirstArgIndex).getStackAlignment();
+}
+
+Type *AttributeList::getParamByValType(unsigned Index) const {
+ return getAttributes(Index+FirstArgIndex).getByValType();
+}
+
+Type *AttributeList::getParamStructRetType(unsigned Index) const {
+ return getAttributes(Index + FirstArgIndex).getStructRetType();
+}
+
+Type *AttributeList::getParamByRefType(unsigned Index) const {
+ return getAttributes(Index + FirstArgIndex).getByRefType();
+}
+
+Type *AttributeList::getParamPreallocatedType(unsigned Index) const {
+ return getAttributes(Index + FirstArgIndex).getPreallocatedType();
+}
+
+Type *AttributeList::getParamInAllocaType(unsigned Index) const {
+ return getAttributes(Index + FirstArgIndex).getInAllocaType();
+}
+
+Type *AttributeList::getParamElementType(unsigned Index) const {
+ return getAttributes(Index + FirstArgIndex).getElementType();
+}
+
+MaybeAlign AttributeList::getFnStackAlignment() const {
+ return getFnAttrs().getStackAlignment();
+}
+
+MaybeAlign AttributeList::getRetStackAlignment() const {
+ return getRetAttrs().getStackAlignment();
+}
+
+uint64_t AttributeList::getRetDereferenceableBytes() const {
+ return getRetAttrs().getDereferenceableBytes();
+}
+
+uint64_t AttributeList::getParamDereferenceableBytes(unsigned Index) const {
+ return getParamAttrs(Index).getDereferenceableBytes();
+}
+
+uint64_t AttributeList::getRetDereferenceableOrNullBytes() const {
+ return getRetAttrs().getDereferenceableOrNullBytes();
+}
+
+uint64_t
+AttributeList::getParamDereferenceableOrNullBytes(unsigned Index) const {
+ return getParamAttrs(Index).getDereferenceableOrNullBytes();
+}
+
+UWTableKind AttributeList::getUWTableKind() const {
+ return getFnAttrs().getUWTableKind();
+}
+
+AllocFnKind AttributeList::getAllocKind() const {
+ return getFnAttrs().getAllocKind();
+}
+
+std::string AttributeList::getAsString(unsigned Index, bool InAttrGrp) const {
+ return getAttributes(Index).getAsString(InAttrGrp);
+}
+
+AttributeSet AttributeList::getAttributes(unsigned Index) const {
+ Index = attrIdxToArrayIdx(Index);
+ if (!pImpl || Index >= getNumAttrSets())
+ return {};
+ return pImpl->begin()[Index];
+}
+
+bool AttributeList::hasParentContext(LLVMContext &C) const {
+ assert(!isEmpty() && "an empty attribute list has no parent context");
+ FoldingSetNodeID ID;
+ pImpl->Profile(ID);
+ void *Unused;
+ return C.pImpl->AttrsLists.FindNodeOrInsertPos(ID, Unused) == pImpl;
+}
+
+AttributeList::iterator AttributeList::begin() const {
+ return pImpl ? pImpl->begin() : nullptr;
+}
+
+AttributeList::iterator AttributeList::end() const {
+ return pImpl ? pImpl->end() : nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeList Introspection Methods
+//===----------------------------------------------------------------------===//
+
+unsigned AttributeList::getNumAttrSets() const {
+ return pImpl ? pImpl->NumAttrSets : 0;
+}
+
+void AttributeList::print(raw_ostream &O) const {
+ O << "AttributeList[\n";
+
+ for (unsigned i : indexes()) {
+ if (!getAttributes(i).hasAttributes())
+ continue;
+ O << " { ";
+ switch (i) {
+ case AttrIndex::ReturnIndex:
+ O << "return";
+ break;
+ case AttrIndex::FunctionIndex:
+ O << "function";
+ break;
+ default:
+ O << "arg(" << i - AttrIndex::FirstArgIndex << ")";
+ }
+ O << " => " << getAsString(i) << " }\n";
+ }
+
+ O << "]\n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void AttributeList::dump() const { print(dbgs()); }
+#endif
+
+//===----------------------------------------------------------------------===//
+// AttrBuilder Method Implementations
+//===----------------------------------------------------------------------===//
+
+AttrBuilder::AttrBuilder(LLVMContext &Ctx, AttributeSet AS) : Ctx(Ctx) {
+ append_range(Attrs, AS);
+ assert(is_sorted(Attrs) && "AttributeSet should be sorted");
+}
+
+void AttrBuilder::clear() { Attrs.clear(); }
+
+/// Attribute comparator that only compares attribute keys. Enum attributes are
+/// sorted before string attributes.
+struct AttributeComparator {
+ bool operator()(Attribute A0, Attribute A1) const {
+ bool A0IsString = A0.isStringAttribute();
+ bool A1IsString = A1.isStringAttribute();
+ if (A0IsString) {
+ if (A1IsString)
+ return A0.getKindAsString() < A1.getKindAsString();
+ else
+ return false;
+ }
+ if (A1IsString)
+ return true;
+ return A0.getKindAsEnum() < A1.getKindAsEnum();
+ }
+ bool operator()(Attribute A0, Attribute::AttrKind Kind) const {
+ if (A0.isStringAttribute())
+ return false;
+ return A0.getKindAsEnum() < Kind;
+ }
+ bool operator()(Attribute A0, StringRef Kind) const {
+ if (A0.isStringAttribute())
+ return A0.getKindAsString() < Kind;
+ return true;
+ }
+};
+
+template <typename K>
+static void addAttributeImpl(SmallVectorImpl<Attribute> &Attrs, K Kind,
+ Attribute Attr) {
+ auto It = lower_bound(Attrs, Kind, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(Kind))
+ std::swap(*It, Attr);
+ else
+ Attrs.insert(It, Attr);
+}
+
+AttrBuilder &AttrBuilder::addAttribute(Attribute Attr) {
+ if (Attr.isStringAttribute())
+ addAttributeImpl(Attrs, Attr.getKindAsString(), Attr);
+ else
+ addAttributeImpl(Attrs, Attr.getKindAsEnum(), Attr);
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Kind) {
+ addAttributeImpl(Attrs, Kind, Attribute::get(Ctx, Kind));
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(StringRef A, StringRef V) {
+ addAttributeImpl(Attrs, A, Attribute::get(Ctx, A, V));
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
+ assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
+ auto It = lower_bound(Attrs, Val, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(Val))
+ Attrs.erase(It);
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(StringRef A) {
+ auto It = lower_bound(Attrs, A, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(A))
+ Attrs.erase(It);
+ return *this;
+}
+
+uint64_t AttrBuilder::getRawIntAttr(Attribute::AttrKind Kind) const {
+ assert(Attribute::isIntAttrKind(Kind) && "Not an int attribute");
+ Attribute A = getAttribute(Kind);
+ return A.isValid() ? A.getValueAsInt() : 0;
+}
+
+AttrBuilder &AttrBuilder::addRawIntAttr(Attribute::AttrKind Kind,
+ uint64_t Value) {
+ return addAttribute(Attribute::get(Ctx, Kind, Value));
+}
+
+std::pair<unsigned, Optional<unsigned>> AttrBuilder::getAllocSizeArgs() const {
+ return unpackAllocSizeArgs(getRawIntAttr(Attribute::AllocSize));
+}
+
+unsigned AttrBuilder::getVScaleRangeMin() const {
+ return unpackVScaleRangeArgs(getRawIntAttr(Attribute::VScaleRange)).first;
+}
+
+Optional<unsigned> AttrBuilder::getVScaleRangeMax() const {
+ return unpackVScaleRangeArgs(getRawIntAttr(Attribute::VScaleRange)).second;
+}
+
+AttrBuilder &AttrBuilder::addAlignmentAttr(MaybeAlign Align) {
+ if (!Align)
+ return *this;
+
+ assert(*Align <= llvm::Value::MaximumAlignment && "Alignment too large.");
+ return addRawIntAttr(Attribute::Alignment, Align->value());
+}
+
+AttrBuilder &AttrBuilder::addStackAlignmentAttr(MaybeAlign Align) {
+ // Default alignment, allow the target to define how to align it.
+ if (!Align)
+ return *this;
+
+ assert(*Align <= 0x100 && "Alignment too large.");
+ return addRawIntAttr(Attribute::StackAlignment, Align->value());
+}
+
+AttrBuilder &AttrBuilder::addDereferenceableAttr(uint64_t Bytes) {
+ if (Bytes == 0) return *this;
+
+ return addRawIntAttr(Attribute::Dereferenceable, Bytes);
+}
+
+AttrBuilder &AttrBuilder::addDereferenceableOrNullAttr(uint64_t Bytes) {
+ if (Bytes == 0)
+ return *this;
+
+ return addRawIntAttr(Attribute::DereferenceableOrNull, Bytes);
+}
+
+AttrBuilder &AttrBuilder::addAllocSizeAttr(unsigned ElemSize,
+ const Optional<unsigned> &NumElems) {
+ return addAllocSizeAttrFromRawRepr(packAllocSizeArgs(ElemSize, NumElems));
+}
+
+AttrBuilder &AttrBuilder::addAllocSizeAttrFromRawRepr(uint64_t RawArgs) {
+ // (0, 0) is our "not present" value, so we need to check for it here.
+ assert(RawArgs && "Invalid allocsize arguments -- given allocsize(0, 0)");
+ return addRawIntAttr(Attribute::AllocSize, RawArgs);
+}
+
+AttrBuilder &AttrBuilder::addVScaleRangeAttr(unsigned MinValue,
+ Optional<unsigned> MaxValue) {
+ return addVScaleRangeAttrFromRawRepr(packVScaleRangeArgs(MinValue, MaxValue));
+}
+
+AttrBuilder &AttrBuilder::addVScaleRangeAttrFromRawRepr(uint64_t RawArgs) {
+ // (0, 0) is not present hence ignore this case
+ if (RawArgs == 0)
+ return *this;
+
+ return addRawIntAttr(Attribute::VScaleRange, RawArgs);
+}
+
+AttrBuilder &AttrBuilder::addUWTableAttr(UWTableKind Kind) {
+ if (Kind == UWTableKind::None)
+ return *this;
+ return addRawIntAttr(Attribute::UWTable, uint64_t(Kind));
+}
+
+AttrBuilder &AttrBuilder::addAllocKindAttr(AllocFnKind Kind) {
+ return addRawIntAttr(Attribute::AllocKind, static_cast<uint64_t>(Kind));
+}
+
+Type *AttrBuilder::getTypeAttr(Attribute::AttrKind Kind) const {
+ assert(Attribute::isTypeAttrKind(Kind) && "Not a type attribute");
+ Attribute A = getAttribute(Kind);
+ return A.isValid() ? A.getValueAsType() : nullptr;
+}
+
+AttrBuilder &AttrBuilder::addTypeAttr(Attribute::AttrKind Kind, Type *Ty) {
+ return addAttribute(Attribute::get(Ctx, Kind, Ty));
+}
+
+AttrBuilder &AttrBuilder::addByValAttr(Type *Ty) {
+ return addTypeAttr(Attribute::ByVal, Ty);
+}
+
+AttrBuilder &AttrBuilder::addStructRetAttr(Type *Ty) {
+ return addTypeAttr(Attribute::StructRet, Ty);
+}
+
+AttrBuilder &AttrBuilder::addByRefAttr(Type *Ty) {
+ return addTypeAttr(Attribute::ByRef, Ty);
+}
+
+AttrBuilder &AttrBuilder::addPreallocatedAttr(Type *Ty) {
+ return addTypeAttr(Attribute::Preallocated, Ty);
+}
+
+AttrBuilder &AttrBuilder::addInAllocaAttr(Type *Ty) {
+ return addTypeAttr(Attribute::InAlloca, Ty);
+}
+
+AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
+ // TODO: Could make this O(n) as we're merging two sorted lists.
+ for (const auto &I : B.attrs())
+ addAttribute(I);
+
+ return *this;
+}
+
+AttrBuilder &AttrBuilder::remove(const AttributeMask &AM) {
+ erase_if(Attrs, [&](Attribute A) { return AM.contains(A); });
+ return *this;
+}
+
+bool AttrBuilder::overlaps(const AttributeMask &AM) const {
+ return any_of(Attrs, [&](Attribute A) { return AM.contains(A); });
+}
+
+Attribute AttrBuilder::getAttribute(Attribute::AttrKind A) const {
+ assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
+ auto It = lower_bound(Attrs, A, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(A))
+ return *It;
+ return {};
+}
+
+Attribute AttrBuilder::getAttribute(StringRef A) const {
+ auto It = lower_bound(Attrs, A, AttributeComparator());
+ if (It != Attrs.end() && It->hasAttribute(A))
+ return *It;
+ return {};
+}
+
+bool AttrBuilder::contains(Attribute::AttrKind A) const {
+ return getAttribute(A).isValid();
+}
+
+bool AttrBuilder::contains(StringRef A) const {
+ return getAttribute(A).isValid();
+}
+
+bool AttrBuilder::hasAlignmentAttr() const {
+ return getRawIntAttr(Attribute::Alignment) != 0;
+}
+
+bool AttrBuilder::operator==(const AttrBuilder &B) const {
+ return Attrs == B.Attrs;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeFuncs Function Defintions
+//===----------------------------------------------------------------------===//
+
+/// Which attributes cannot be applied to a type.
+AttributeMask AttributeFuncs::typeIncompatible(Type *Ty,
+ AttributeSafetyKind ASK) {
+ AttributeMask Incompatible;
+
+ if (!Ty->isIntegerTy()) {
+ // Attributes that only apply to integers.
+ if (ASK & ASK_SAFE_TO_DROP)
+ Incompatible.addAttribute(Attribute::AllocAlign);
+ if (ASK & ASK_UNSAFE_TO_DROP)
+ Incompatible.addAttribute(Attribute::SExt).addAttribute(Attribute::ZExt);
+ }
+
+ if (!Ty->isPointerTy()) {
+ // Attributes that only apply to pointers.
+ if (ASK & ASK_SAFE_TO_DROP)
+ Incompatible.addAttribute(Attribute::NoAlias)
+ .addAttribute(Attribute::NoCapture)
+ .addAttribute(Attribute::NonNull)
+ .addAttribute(Attribute::ReadNone)
+ .addAttribute(Attribute::ReadOnly)
+ .addAttribute(Attribute::Dereferenceable)
+ .addAttribute(Attribute::DereferenceableOrNull);
+ if (ASK & ASK_UNSAFE_TO_DROP)
+ Incompatible.addAttribute(Attribute::Nest)
+ .addAttribute(Attribute::SwiftError)
+ .addAttribute(Attribute::Preallocated)
+ .addAttribute(Attribute::InAlloca)
+ .addAttribute(Attribute::ByVal)
+ .addAttribute(Attribute::StructRet)
+ .addAttribute(Attribute::ByRef)
+ .addAttribute(Attribute::ElementType)
+ .addAttribute(Attribute::AllocatedPointer);
+ }
+
+ // Attributes that only apply to pointers or vectors of pointers.
+ if (!Ty->isPtrOrPtrVectorTy()) {
+ if (ASK & ASK_SAFE_TO_DROP)
+ Incompatible.addAttribute(Attribute::Alignment);
+ }
+
+ // Some attributes can apply to all "values" but there are no `void` values.
+ if (Ty->isVoidTy()) {
+ if (ASK & ASK_SAFE_TO_DROP)
+ Incompatible.addAttribute(Attribute::NoUndef);
+ }
+
+ return Incompatible;
+}
+
+AttributeMask AttributeFuncs::getUBImplyingAttributes() {
+ AttributeMask AM;
+ AM.addAttribute(Attribute::NoUndef);
+ AM.addAttribute(Attribute::Dereferenceable);
+ AM.addAttribute(Attribute::DereferenceableOrNull);
+ return AM;
+}
+
+template<typename AttrClass>
+static bool isEqual(const Function &Caller, const Function &Callee) {
+ return Caller.getFnAttribute(AttrClass::getKind()) ==
+ Callee.getFnAttribute(AttrClass::getKind());
+}
+
+/// Compute the logical AND of the attributes of the caller and the
+/// callee.
+///
+/// This function sets the caller's attribute to false if the callee's attribute
+/// is false.
+template<typename AttrClass>
+static void setAND(Function &Caller, const Function &Callee) {
+ if (AttrClass::isSet(Caller, AttrClass::getKind()) &&
+ !AttrClass::isSet(Callee, AttrClass::getKind()))
+ AttrClass::set(Caller, AttrClass::getKind(), false);
+}
+
+/// Compute the logical OR of the attributes of the caller and the
+/// callee.
+///
+/// This function sets the caller's attribute to true if the callee's attribute
+/// is true.
+template<typename AttrClass>
+static void setOR(Function &Caller, const Function &Callee) {
+ if (!AttrClass::isSet(Caller, AttrClass::getKind()) &&
+ AttrClass::isSet(Callee, AttrClass::getKind()))
+ AttrClass::set(Caller, AttrClass::getKind(), true);
+}
+
+/// If the inlined function had a higher stack protection level than the
+/// calling function, then bump up the caller's stack protection level.
+static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
+ // If the calling function has *no* stack protection level (e.g. it was built
+ // with Clang's -fno-stack-protector or no_stack_protector attribute), don't
+ // change it as that could change the program's semantics.
+ if (!Caller.hasStackProtectorFnAttr())
+ return;
+
+ // If upgrading the SSP attribute, clear out the old SSP Attributes first.
+ // Having multiple SSP attributes doesn't actually hurt, but it adds useless
+ // clutter to the IR.
+ AttributeMask OldSSPAttr;
+ OldSSPAttr.addAttribute(Attribute::StackProtect)
+ .addAttribute(Attribute::StackProtectStrong)
+ .addAttribute(Attribute::StackProtectReq);
+
+ if (Callee.hasFnAttribute(Attribute::StackProtectReq)) {
+ Caller.removeFnAttrs(OldSSPAttr);
+ Caller.addFnAttr(Attribute::StackProtectReq);
+ } else if (Callee.hasFnAttribute(Attribute::StackProtectStrong) &&
+ !Caller.hasFnAttribute(Attribute::StackProtectReq)) {
+ Caller.removeFnAttrs(OldSSPAttr);
+ Caller.addFnAttr(Attribute::StackProtectStrong);
+ } else if (Callee.hasFnAttribute(Attribute::StackProtect) &&
+ !Caller.hasFnAttribute(Attribute::StackProtectReq) &&
+ !Caller.hasFnAttribute(Attribute::StackProtectStrong))
+ Caller.addFnAttr(Attribute::StackProtect);
+}
+
+/// If the inlined function required stack probes, then ensure that
+/// the calling function has those too.
+static void adjustCallerStackProbes(Function &Caller, const Function &Callee) {
+ if (!Caller.hasFnAttribute("probe-stack") &&
+ Callee.hasFnAttribute("probe-stack")) {
+ Caller.addFnAttr(Callee.getFnAttribute("probe-stack"));
+ }
+}
+
+/// If the inlined function defines the size of guard region
+/// on the stack, then ensure that the calling function defines a guard region
+/// that is no larger.
+static void
+adjustCallerStackProbeSize(Function &Caller, const Function &Callee) {
+ Attribute CalleeAttr = Callee.getFnAttribute("stack-probe-size");
+ if (CalleeAttr.isValid()) {
+ Attribute CallerAttr = Caller.getFnAttribute("stack-probe-size");
+ if (CallerAttr.isValid()) {
+ uint64_t CallerStackProbeSize, CalleeStackProbeSize;
+ CallerAttr.getValueAsString().getAsInteger(0, CallerStackProbeSize);
+ CalleeAttr.getValueAsString().getAsInteger(0, CalleeStackProbeSize);
+
+ if (CallerStackProbeSize > CalleeStackProbeSize) {
+ Caller.addFnAttr(CalleeAttr);
+ }
+ } else {
+ Caller.addFnAttr(CalleeAttr);
+ }
+ }
+}
+
+/// If the inlined function defines a min legal vector width, then ensure
+/// the calling function has the same or larger min legal vector width. If the
+/// caller has the attribute, but the callee doesn't, we need to remove the
+/// attribute from the caller since we can't make any guarantees about the
+/// caller's requirements.
+/// This function is called after the inlining decision has been made so we have
+/// to merge the attribute this way. Heuristics that would use
+/// min-legal-vector-width to determine inline compatibility would need to be
+/// handled as part of inline cost analysis.
+static void
+adjustMinLegalVectorWidth(Function &Caller, const Function &Callee) {
+ Attribute CallerAttr = Caller.getFnAttribute("min-legal-vector-width");
+ if (CallerAttr.isValid()) {
+ Attribute CalleeAttr = Callee.getFnAttribute("min-legal-vector-width");
+ if (CalleeAttr.isValid()) {
+ uint64_t CallerVectorWidth, CalleeVectorWidth;
+ CallerAttr.getValueAsString().getAsInteger(0, CallerVectorWidth);
+ CalleeAttr.getValueAsString().getAsInteger(0, CalleeVectorWidth);
+ if (CallerVectorWidth < CalleeVectorWidth)
+ Caller.addFnAttr(CalleeAttr);
+ } else {
+ // If the callee doesn't have the attribute then we don't know anything
+ // and must drop the attribute from the caller.
+ Caller.removeFnAttr("min-legal-vector-width");
+ }
+ }
+}
+
+/// If the inlined function has null_pointer_is_valid attribute,
+/// set this attribute in the caller post inlining.
+static void
+adjustNullPointerValidAttr(Function &Caller, const Function &Callee) {
+ if (Callee.nullPointerIsDefined() && !Caller.nullPointerIsDefined()) {
+ Caller.addFnAttr(Attribute::NullPointerIsValid);
+ }
+}
+
+struct EnumAttr {
+ static bool isSet(const Function &Fn,
+ Attribute::AttrKind Kind) {
+ return Fn.hasFnAttribute(Kind);
+ }
+
+ static void set(Function &Fn,
+ Attribute::AttrKind Kind, bool Val) {
+ if (Val)
+ Fn.addFnAttr(Kind);
+ else
+ Fn.removeFnAttr(Kind);
+ }
+};
+
+struct StrBoolAttr {
+ static bool isSet(const Function &Fn,
+ StringRef Kind) {
+ auto A = Fn.getFnAttribute(Kind);
+ return A.getValueAsString().equals("true");
+ }
+
+ static void set(Function &Fn,
+ StringRef Kind, bool Val) {
+ Fn.addFnAttr(Kind, Val ? "true" : "false");
+ }
+};
+
+#define GET_ATTR_NAMES
+#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME) \
+ struct ENUM_NAME##Attr : EnumAttr { \
+ static enum Attribute::AttrKind getKind() { \
+ return llvm::Attribute::ENUM_NAME; \
+ } \
+ };
+#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
+ struct ENUM_NAME##Attr : StrBoolAttr { \
+ static StringRef getKind() { return #DISPLAY_NAME; } \
+ };
+#include "llvm/IR/Attributes.inc"
+
+#define GET_ATTR_COMPAT_FUNC
+#include "llvm/IR/Attributes.inc"
+
+bool AttributeFuncs::areInlineCompatible(const Function &Caller,
+ const Function &Callee) {
+ return hasCompatibleFnAttrs(Caller, Callee);
+}
+
+bool AttributeFuncs::areOutlineCompatible(const Function &A,
+ const Function &B) {
+ return hasCompatibleFnAttrs(A, B);
+}
+
+void AttributeFuncs::mergeAttributesForInlining(Function &Caller,
+ const Function &Callee) {
+ mergeFnAttrs(Caller, Callee);
+}
+
+void AttributeFuncs::mergeAttributesForOutlining(Function &Base,
+ const Function &ToMerge) {
+
+ // We merge functions so that they meet the most general case.
+ // For example, if the NoNansFPMathAttr is set in one function, but not in
+ // the other, in the merged function we can say that the NoNansFPMathAttr
+ // is not set.
+ // However if we have the SpeculativeLoadHardeningAttr set true in one
+ // function, but not the other, we make sure that the function retains
+ // that aspect in the merged function.
+ mergeFnAttrs(Base, ToMerge);
+}
+
+void AttributeFuncs::updateMinLegalVectorWidthAttr(Function &Fn,
+ uint64_t Width) {
+ Attribute Attr = Fn.getFnAttribute("min-legal-vector-width");
+ if (Attr.isValid()) {
+ uint64_t OldWidth;
+ Attr.getValueAsString().getAsInteger(0, OldWidth);
+ if (Width > OldWidth)
+ Fn.addFnAttr("min-legal-vector-width", llvm::utostr(Width));
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
new file mode 100644
index 000000000000..75594f90c926
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/AutoUpgrade.cpp
@@ -0,0 +1,4706 @@
+//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the auto-upgrade helper functions.
+// This is where deprecated IR intrinsics and other IR features are updated to
+// current specifications.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/IntrinsicsX86.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Regex.h"
+#include <cstring>
+using namespace llvm;
+
+static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
+
+// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
+// changed their type from v4f32 to v2i64.
+static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
+ Function *&NewFn) {
+ // Check whether this is an old version of the function, which received
+ // v4f32 arguments.
+ Type *Arg0Type = F->getFunctionType()->getParamType(0);
+ if (Arg0Type != FixedVectorType::get(Type::getFloatTy(F->getContext()), 4))
+ return false;
+
+ // Yes, it's old, replace it with new version.
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ return true;
+}
+
+// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
+// arguments have changed their type from i32 to i8.
+static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
+ Function *&NewFn) {
+ // Check that the last argument is an i32.
+ Type *LastArgType = F->getFunctionType()->getParamType(
+ F->getFunctionType()->getNumParams() - 1);
+ if (!LastArgType->isIntegerTy(32))
+ return false;
+
+ // Move this function aside and map down.
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ return true;
+}
+
+// Upgrade the declaration of fp compare intrinsics that change return type
+// from scalar to vXi1 mask.
+static bool UpgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
+ Function *&NewFn) {
+ // Check if the return type is a vector.
+ if (F->getReturnType()->isVectorTy())
+ return false;
+
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ return true;
+}
+
+static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
+ // All of the intrinsics matches below should be marked with which llvm
+ // version started autoupgrading them. At some point in the future we would
+ // like to use this information to remove upgrade code for some older
+ // intrinsics. It is currently undecided how we will determine that future
+ // point.
+ if (Name == "addcarryx.u32" || // Added in 8.0
+ Name == "addcarryx.u64" || // Added in 8.0
+ Name == "addcarry.u32" || // Added in 8.0
+ Name == "addcarry.u64" || // Added in 8.0
+ Name == "subborrow.u32" || // Added in 8.0
+ Name == "subborrow.u64" || // Added in 8.0
+ Name.startswith("sse2.padds.") || // Added in 8.0
+ Name.startswith("sse2.psubs.") || // Added in 8.0
+ Name.startswith("sse2.paddus.") || // Added in 8.0
+ Name.startswith("sse2.psubus.") || // Added in 8.0
+ Name.startswith("avx2.padds.") || // Added in 8.0
+ Name.startswith("avx2.psubs.") || // Added in 8.0
+ Name.startswith("avx2.paddus.") || // Added in 8.0
+ Name.startswith("avx2.psubus.") || // Added in 8.0
+ Name.startswith("avx512.padds.") || // Added in 8.0
+ Name.startswith("avx512.psubs.") || // Added in 8.0
+ Name.startswith("avx512.mask.padds.") || // Added in 8.0
+ Name.startswith("avx512.mask.psubs.") || // Added in 8.0
+ Name.startswith("avx512.mask.paddus.") || // Added in 8.0
+ Name.startswith("avx512.mask.psubus.") || // Added in 8.0
+ Name=="ssse3.pabs.b.128" || // Added in 6.0
+ Name=="ssse3.pabs.w.128" || // Added in 6.0
+ Name=="ssse3.pabs.d.128" || // Added in 6.0
+ Name.startswith("fma4.vfmadd.s") || // Added in 7.0
+ Name.startswith("fma.vfmadd.") || // Added in 7.0
+ Name.startswith("fma.vfmsub.") || // Added in 7.0
+ Name.startswith("fma.vfmsubadd.") || // Added in 7.0
+ Name.startswith("fma.vfnmadd.") || // Added in 7.0
+ Name.startswith("fma.vfnmsub.") || // Added in 7.0
+ Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0
+ Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0
+ Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0
+ Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0
+ Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0
+ Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0
+ Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0
+ Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0
+ Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
+ Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
+ Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
+ Name.startswith("avx512.kunpck") || //added in 6.0
+ Name.startswith("avx2.pabs.") || // Added in 6.0
+ Name.startswith("avx512.mask.pabs.") || // Added in 6.0
+ Name.startswith("avx512.broadcastm") || // Added in 6.0
+ Name == "sse.sqrt.ss" || // Added in 7.0
+ Name == "sse2.sqrt.sd" || // Added in 7.0
+ Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0
+ Name.startswith("avx.sqrt.p") || // Added in 7.0
+ Name.startswith("sse2.sqrt.p") || // Added in 7.0
+ Name.startswith("sse.sqrt.p") || // Added in 7.0
+ Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
+ Name.startswith("sse2.pcmpeq.") || // Added in 3.1
+ Name.startswith("sse2.pcmpgt.") || // Added in 3.1
+ Name.startswith("avx2.pcmpeq.") || // Added in 3.1
+ Name.startswith("avx2.pcmpgt.") || // Added in 3.1
+ Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
+ Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
+ Name.startswith("avx.vperm2f128.") || // Added in 6.0
+ Name == "avx2.vperm2i128" || // Added in 6.0
+ Name == "sse.add.ss" || // Added in 4.0
+ Name == "sse2.add.sd" || // Added in 4.0
+ Name == "sse.sub.ss" || // Added in 4.0
+ Name == "sse2.sub.sd" || // Added in 4.0
+ Name == "sse.mul.ss" || // Added in 4.0
+ Name == "sse2.mul.sd" || // Added in 4.0
+ Name == "sse.div.ss" || // Added in 4.0
+ Name == "sse2.div.sd" || // Added in 4.0
+ Name == "sse41.pmaxsb" || // Added in 3.9
+ Name == "sse2.pmaxs.w" || // Added in 3.9
+ Name == "sse41.pmaxsd" || // Added in 3.9
+ Name == "sse2.pmaxu.b" || // Added in 3.9
+ Name == "sse41.pmaxuw" || // Added in 3.9
+ Name == "sse41.pmaxud" || // Added in 3.9
+ Name == "sse41.pminsb" || // Added in 3.9
+ Name == "sse2.pmins.w" || // Added in 3.9
+ Name == "sse41.pminsd" || // Added in 3.9
+ Name == "sse2.pminu.b" || // Added in 3.9
+ Name == "sse41.pminuw" || // Added in 3.9
+ Name == "sse41.pminud" || // Added in 3.9
+ Name == "avx512.kand.w" || // Added in 7.0
+ Name == "avx512.kandn.w" || // Added in 7.0
+ Name == "avx512.knot.w" || // Added in 7.0
+ Name == "avx512.kor.w" || // Added in 7.0
+ Name == "avx512.kxor.w" || // Added in 7.0
+ Name == "avx512.kxnor.w" || // Added in 7.0
+ Name == "avx512.kortestc.w" || // Added in 7.0
+ Name == "avx512.kortestz.w" || // Added in 7.0
+ Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
+ Name.startswith("avx2.pmax") || // Added in 3.9
+ Name.startswith("avx2.pmin") || // Added in 3.9
+ Name.startswith("avx512.mask.pmax") || // Added in 4.0
+ Name.startswith("avx512.mask.pmin") || // Added in 4.0
+ Name.startswith("avx2.vbroadcast") || // Added in 3.8
+ Name.startswith("avx2.pbroadcast") || // Added in 3.8
+ Name.startswith("avx.vpermil.") || // Added in 3.1
+ Name.startswith("sse2.pshuf") || // Added in 3.9
+ Name.startswith("avx512.pbroadcast") || // Added in 3.9
+ Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
+ Name.startswith("avx512.mask.movddup") || // Added in 3.9
+ Name.startswith("avx512.mask.movshdup") || // Added in 3.9
+ Name.startswith("avx512.mask.movsldup") || // Added in 3.9
+ Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
+ Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
+ Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
+ Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
+ Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
+ Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
+ Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
+ Name.startswith("avx512.mask.punpckl") || // Added in 3.9
+ Name.startswith("avx512.mask.punpckh") || // Added in 3.9
+ Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
+ Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
+ Name.startswith("avx512.mask.pand.") || // Added in 3.9
+ Name.startswith("avx512.mask.pandn.") || // Added in 3.9
+ Name.startswith("avx512.mask.por.") || // Added in 3.9
+ Name.startswith("avx512.mask.pxor.") || // Added in 3.9
+ Name.startswith("avx512.mask.and.") || // Added in 3.9
+ Name.startswith("avx512.mask.andn.") || // Added in 3.9
+ Name.startswith("avx512.mask.or.") || // Added in 3.9
+ Name.startswith("avx512.mask.xor.") || // Added in 3.9
+ Name.startswith("avx512.mask.padd.") || // Added in 4.0
+ Name.startswith("avx512.mask.psub.") || // Added in 4.0
+ Name.startswith("avx512.mask.pmull.") || // Added in 4.0
+ Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
+ Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
+ Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0
+ Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0
+ Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0
+ Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0
+ Name == "avx512.mask.vcvtph2ps.128" || // Added in 11.0
+ Name == "avx512.mask.vcvtph2ps.256" || // Added in 11.0
+ Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0
+ Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0
+ Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0
+ Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0
+ Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
+ Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
+ Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
+ Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
+ Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
+ Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
+ Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
+ Name == "avx512.cvtusi2sd" || // Added in 7.0
+ Name.startswith("avx512.mask.permvar.") || // Added in 7.0
+ Name == "sse2.pmulu.dq" || // Added in 7.0
+ Name == "sse41.pmuldq" || // Added in 7.0
+ Name == "avx2.pmulu.dq" || // Added in 7.0
+ Name == "avx2.pmul.dq" || // Added in 7.0
+ Name == "avx512.pmulu.dq.512" || // Added in 7.0
+ Name == "avx512.pmul.dq.512" || // Added in 7.0
+ Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
+ Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
+ Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
+ Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
+ Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
+ Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
+ Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
+ Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
+ Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
+ Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
+ Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
+ Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
+ Name.startswith("avx512.cmp.p") || // Added in 12.0
+ Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
+ Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
+ Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
+ Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
+ Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
+ Name.startswith("avx512.mask.psll.d") || // Added in 4.0
+ Name.startswith("avx512.mask.psll.q") || // Added in 4.0
+ Name.startswith("avx512.mask.psll.w") || // Added in 4.0
+ Name.startswith("avx512.mask.psra.d") || // Added in 4.0
+ Name.startswith("avx512.mask.psra.q") || // Added in 4.0
+ Name.startswith("avx512.mask.psra.w") || // Added in 4.0
+ Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
+ Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
+ Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
+ Name.startswith("avx512.mask.pslli") || // Added in 4.0
+ Name.startswith("avx512.mask.psrai") || // Added in 4.0
+ Name.startswith("avx512.mask.psrli") || // Added in 4.0
+ Name.startswith("avx512.mask.psllv") || // Added in 4.0
+ Name.startswith("avx512.mask.psrav") || // Added in 4.0
+ Name.startswith("avx512.mask.psrlv") || // Added in 4.0
+ Name.startswith("sse41.pmovsx") || // Added in 3.8
+ Name.startswith("sse41.pmovzx") || // Added in 3.9
+ Name.startswith("avx2.pmovsx") || // Added in 3.9
+ Name.startswith("avx2.pmovzx") || // Added in 3.9
+ Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
+ Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
+ Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
+ Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
+ Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
+ Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
+ Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
+ Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
+ Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
+ Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
+ Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
+ Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
+ Name.startswith("avx512.vpshld.") || // Added in 8.0
+ Name.startswith("avx512.vpshrd.") || // Added in 8.0
+ Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
+ Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
+ Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
+ Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
+ Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
+ Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
+ Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
+ Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
+ Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
+ Name.startswith("avx512.mask.conflict.") || // Added in 9.0
+ Name == "avx512.mask.pmov.qd.256" || // Added in 9.0
+ Name == "avx512.mask.pmov.qd.512" || // Added in 9.0
+ Name == "avx512.mask.pmov.wb.256" || // Added in 9.0
+ Name == "avx512.mask.pmov.wb.512" || // Added in 9.0
+ Name == "sse.cvtsi2ss" || // Added in 7.0
+ Name == "sse.cvtsi642ss" || // Added in 7.0
+ Name == "sse2.cvtsi2sd" || // Added in 7.0
+ Name == "sse2.cvtsi642sd" || // Added in 7.0
+ Name == "sse2.cvtss2sd" || // Added in 7.0
+ Name == "sse2.cvtdq2pd" || // Added in 3.9
+ Name == "sse2.cvtdq2ps" || // Added in 7.0
+ Name == "sse2.cvtps2pd" || // Added in 3.9
+ Name == "avx.cvtdq2.pd.256" || // Added in 3.9
+ Name == "avx.cvtdq2.ps.256" || // Added in 7.0
+ Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
+ Name.startswith("vcvtph2ps.") || // Added in 11.0
+ Name.startswith("avx.vinsertf128.") || // Added in 3.7
+ Name == "avx2.vinserti128" || // Added in 3.7
+ Name.startswith("avx512.mask.insert") || // Added in 4.0
+ Name.startswith("avx.vextractf128.") || // Added in 3.7
+ Name == "avx2.vextracti128" || // Added in 3.7
+ Name.startswith("avx512.mask.vextract") || // Added in 4.0
+ Name.startswith("sse4a.movnt.") || // Added in 3.9
+ Name.startswith("avx.movnt.") || // Added in 3.2
+ Name.startswith("avx512.storent.") || // Added in 3.9
+ Name == "sse41.movntdqa" || // Added in 5.0
+ Name == "avx2.movntdqa" || // Added in 5.0
+ Name == "avx512.movntdqa" || // Added in 5.0
+ Name == "sse2.storel.dq" || // Added in 3.9
+ Name.startswith("sse.storeu.") || // Added in 3.9
+ Name.startswith("sse2.storeu.") || // Added in 3.9
+ Name.startswith("avx.storeu.") || // Added in 3.9
+ Name.startswith("avx512.mask.storeu.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.p") || // Added in 3.9
+ Name.startswith("avx512.mask.store.b.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.w.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.d.") || // Added in 3.9
+ Name.startswith("avx512.mask.store.q.") || // Added in 3.9
+ Name == "avx512.mask.store.ss" || // Added in 7.0
+ Name.startswith("avx512.mask.loadu.") || // Added in 3.9
+ Name.startswith("avx512.mask.load.") || // Added in 3.9
+ Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
+ Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
+ Name.startswith("avx512.mask.expand.b") || // Added in 9.0
+ Name.startswith("avx512.mask.expand.w") || // Added in 9.0
+ Name.startswith("avx512.mask.expand.d") || // Added in 9.0
+ Name.startswith("avx512.mask.expand.q") || // Added in 9.0
+ Name.startswith("avx512.mask.expand.p") || // Added in 9.0
+ Name.startswith("avx512.mask.compress.b") || // Added in 9.0
+ Name.startswith("avx512.mask.compress.w") || // Added in 9.0
+ Name.startswith("avx512.mask.compress.d") || // Added in 9.0
+ Name.startswith("avx512.mask.compress.q") || // Added in 9.0
+ Name.startswith("avx512.mask.compress.p") || // Added in 9.0
+ Name == "sse42.crc32.64.8" || // Added in 3.4
+ Name.startswith("avx.vbroadcast.s") || // Added in 3.5
+ Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
+ Name.startswith("avx512.mask.palignr.") || // Added in 3.9
+ Name.startswith("avx512.mask.valign.") || // Added in 4.0
+ Name.startswith("sse2.psll.dq") || // Added in 3.7
+ Name.startswith("sse2.psrl.dq") || // Added in 3.7
+ Name.startswith("avx2.psll.dq") || // Added in 3.7
+ Name.startswith("avx2.psrl.dq") || // Added in 3.7
+ Name.startswith("avx512.psll.dq") || // Added in 3.9
+ Name.startswith("avx512.psrl.dq") || // Added in 3.9
+ Name == "sse41.pblendw" || // Added in 3.7
+ Name.startswith("sse41.blendp") || // Added in 3.7
+ Name.startswith("avx.blend.p") || // Added in 3.7
+ Name == "avx2.pblendw" || // Added in 3.7
+ Name.startswith("avx2.pblendd.") || // Added in 3.7
+ Name.startswith("avx.vbroadcastf128") || // Added in 4.0
+ Name == "avx2.vbroadcasti128" || // Added in 3.7
+ Name.startswith("avx512.mask.broadcastf32x4.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcastf64x2.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcastf32x8.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcastf64x4.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcasti32x4.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcasti64x2.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcasti32x8.") || // Added in 6.0
+ Name.startswith("avx512.mask.broadcasti64x4.") || // Added in 6.0
+ Name == "xop.vpcmov" || // Added in 3.8
+ Name == "xop.vpcmov.256" || // Added in 5.0
+ Name.startswith("avx512.mask.move.s") || // Added in 4.0
+ Name.startswith("avx512.cvtmask2") || // Added in 5.0
+ Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0
+ Name.startswith("xop.vprot") || // Added in 8.0
+ Name.startswith("avx512.prol") || // Added in 8.0
+ Name.startswith("avx512.pror") || // Added in 8.0
+ Name.startswith("avx512.mask.prorv.") || // Added in 8.0
+ Name.startswith("avx512.mask.pror.") || // Added in 8.0
+ Name.startswith("avx512.mask.prolv.") || // Added in 8.0
+ Name.startswith("avx512.mask.prol.") || // Added in 8.0
+ Name.startswith("avx512.ptestm") || //Added in 6.0
+ Name.startswith("avx512.ptestnm") || //Added in 6.0
+ Name.startswith("avx512.mask.pavg")) // Added in 6.0
+ return true;
+
+ return false;
+}
+
+static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
+ Function *&NewFn) {
+ // Only handle intrinsics that start with "x86.".
+ if (!Name.startswith("x86."))
+ return false;
+ // Remove "x86." prefix.
+ Name = Name.substr(4);
+
+ if (ShouldUpgradeX86Intrinsic(F, Name)) {
+ NewFn = nullptr;
+ return true;
+ }
+
+ if (Name == "rdtscp") { // Added in 8.0
+ // If this intrinsic has 0 operands, it's the new version.
+ if (F->getFunctionType()->getNumParams() == 0)
+ return false;
+
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_rdtscp);
+ return true;
+ }
+
+ // SSE4.1 ptest functions may have an old signature.
+ if (Name.startswith("sse41.ptest")) { // Added in 3.2
+ if (Name.substr(11) == "c")
+ return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
+ if (Name.substr(11) == "z")
+ return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
+ if (Name.substr(11) == "nzc")
+ return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
+ }
+ // Several blend and other instructions with masks used the wrong number of
+ // bits.
+ if (Name == "sse41.insertps") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
+ NewFn);
+ if (Name == "sse41.dppd") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
+ NewFn);
+ if (Name == "sse41.dpps") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
+ NewFn);
+ if (Name == "sse41.mpsadbw") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
+ NewFn);
+ if (Name == "avx.dp.ps.256") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
+ NewFn);
+ if (Name == "avx2.mpsadbw") // Added in 3.6
+ return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
+ NewFn);
+ if (Name == "avx512.mask.cmp.pd.128") // Added in 7.0
+ return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_128,
+ NewFn);
+ if (Name == "avx512.mask.cmp.pd.256") // Added in 7.0
+ return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_256,
+ NewFn);
+ if (Name == "avx512.mask.cmp.pd.512") // Added in 7.0
+ return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_pd_512,
+ NewFn);
+ if (Name == "avx512.mask.cmp.ps.128") // Added in 7.0
+ return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_128,
+ NewFn);
+ if (Name == "avx512.mask.cmp.ps.256") // Added in 7.0
+ return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_256,
+ NewFn);
+ if (Name == "avx512.mask.cmp.ps.512") // Added in 7.0
+ return UpgradeX86MaskedFPCompare(F, Intrinsic::x86_avx512_mask_cmp_ps_512,
+ NewFn);
+
+ // frcz.ss/sd may need to have an argument dropped. Added in 3.2
+ if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_ss);
+ return true;
+ }
+ if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_xop_vfrcz_sd);
+ return true;
+ }
+ // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
+ if (Name.startswith("xop.vpermil2")) { // Added in 3.9
+ auto Idx = F->getFunctionType()->getParamType(2);
+ if (Idx->isFPOrFPVectorTy()) {
+ rename(F);
+ unsigned IdxSize = Idx->getPrimitiveSizeInBits();
+ unsigned EltSize = Idx->getScalarSizeInBits();
+ Intrinsic::ID Permil2ID;
+ if (EltSize == 64 && IdxSize == 128)
+ Permil2ID = Intrinsic::x86_xop_vpermil2pd;
+ else if (EltSize == 32 && IdxSize == 128)
+ Permil2ID = Intrinsic::x86_xop_vpermil2ps;
+ else if (EltSize == 64 && IdxSize == 256)
+ Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
+ else
+ Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
+ return true;
+ }
+ }
+
+ if (Name == "seh.recoverfp") {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
+ return true;
+ }
+
+ return false;
+}
+
+static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
+ assert(F && "Illegal to upgrade a non-existent Function.");
+
+ // Quickly eliminate it, if it's not a candidate.
+ StringRef Name = F->getName();
+ if (Name.size() <= 8 || !Name.startswith("llvm."))
+ return false;
+ Name = Name.substr(5); // Strip off "llvm."
+
+ switch (Name[0]) {
+ default: break;
+ case 'a': {
+ if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("aarch64.neon.frintn")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::roundeven,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("aarch64.neon.rbit")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vclz")) {
+ Type* args[2] = {
+ F->arg_begin()->getType(),
+ Type::getInt1Ty(F->getContext())
+ };
+ // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
+ // the end of the name. Change name from llvm.arm.neon.vclz.* to
+ // llvm.ctlz.*
+ FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
+ NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
+ "llvm.ctlz." + Name.substr(14), F->getParent());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vcnt")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
+ F->arg_begin()->getType());
+ return true;
+ }
+ static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
+ if (vstRegex.match(Name)) {
+ static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
+ Intrinsic::arm_neon_vst2,
+ Intrinsic::arm_neon_vst3,
+ Intrinsic::arm_neon_vst4};
+
+ static const Intrinsic::ID StoreLaneInts[] = {
+ Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
+ Intrinsic::arm_neon_vst4lane
+ };
+
+ auto fArgs = F->getFunctionType()->params();
+ Type *Tys[] = {fArgs[0], fArgs[1]};
+ if (!Name.contains("lane"))
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ StoreInts[fArgs.size() - 3], Tys);
+ else
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ StoreLaneInts[fArgs.size() - 5], Tys);
+ return true;
+ }
+ if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
+ return true;
+ }
+ if (Name.startswith("arm.neon.vqadds.")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vqaddu.")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vqsubs.")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("arm.neon.vqsubu.")) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("aarch64.neon.addp")) {
+ if (F->arg_size() != 2)
+ break; // Invalid IR.
+ VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
+ if (Ty && Ty->getElementType()->isFloatingPointTy()) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::aarch64_neon_faddp, Ty);
+ return true;
+ }
+ }
+
+ // Changed in 12.0: bfdot accept v4bf16 and v8bf16 instead of v8i8 and v16i8
+ // respectively
+ if ((Name.startswith("arm.neon.bfdot.") ||
+ Name.startswith("aarch64.neon.bfdot.")) &&
+ Name.endswith("i8")) {
+ Intrinsic::ID IID =
+ StringSwitch<Intrinsic::ID>(Name)
+ .Cases("arm.neon.bfdot.v2f32.v8i8",
+ "arm.neon.bfdot.v4f32.v16i8",
+ Intrinsic::arm_neon_bfdot)
+ .Cases("aarch64.neon.bfdot.v2f32.v8i8",
+ "aarch64.neon.bfdot.v4f32.v16i8",
+ Intrinsic::aarch64_neon_bfdot)
+ .Default(Intrinsic::not_intrinsic);
+ if (IID == Intrinsic::not_intrinsic)
+ break;
+
+ size_t OperandWidth = F->getReturnType()->getPrimitiveSizeInBits();
+ assert((OperandWidth == 64 || OperandWidth == 128) &&
+ "Unexpected operand width");
+ LLVMContext &Ctx = F->getParent()->getContext();
+ std::array<Type *, 2> Tys {{
+ F->getReturnType(),
+ FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)
+ }};
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
+ return true;
+ }
+
+ // Changed in 12.0: bfmmla, bfmlalb and bfmlalt are not polymorphic anymore
+ // and accept v8bf16 instead of v16i8
+ if ((Name.startswith("arm.neon.bfm") ||
+ Name.startswith("aarch64.neon.bfm")) &&
+ Name.endswith(".v4f32.v16i8")) {
+ Intrinsic::ID IID =
+ StringSwitch<Intrinsic::ID>(Name)
+ .Case("arm.neon.bfmmla.v4f32.v16i8",
+ Intrinsic::arm_neon_bfmmla)
+ .Case("arm.neon.bfmlalb.v4f32.v16i8",
+ Intrinsic::arm_neon_bfmlalb)
+ .Case("arm.neon.bfmlalt.v4f32.v16i8",
+ Intrinsic::arm_neon_bfmlalt)
+ .Case("aarch64.neon.bfmmla.v4f32.v16i8",
+ Intrinsic::aarch64_neon_bfmmla)
+ .Case("aarch64.neon.bfmlalb.v4f32.v16i8",
+ Intrinsic::aarch64_neon_bfmlalb)
+ .Case("aarch64.neon.bfmlalt.v4f32.v16i8",
+ Intrinsic::aarch64_neon_bfmlalt)
+ .Default(Intrinsic::not_intrinsic);
+ if (IID == Intrinsic::not_intrinsic)
+ break;
+
+ std::array<Type *, 0> Tys;
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID, Tys);
+ return true;
+ }
+
+ if (Name == "arm.mve.vctp64" &&
+ cast<FixedVectorType>(F->getReturnType())->getNumElements() == 4) {
+ // A vctp64 returning a v4i1 is converted to return a v2i1. Rename the
+ // function and deal with it below in UpgradeIntrinsicCall.
+ rename(F);
+ return true;
+ }
+ // These too are changed to accept a v2i1 insteead of the old v4i1.
+ if (Name == "arm.mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
+ Name == "arm.mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
+ Name == "arm.mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
+ Name == "arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
+ Name == "arm.mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
+ Name == "arm.mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
+ Name == "arm.mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
+ Name == "arm.mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
+ Name == "arm.cde.vcx1q.predicated.v2i64.v4i1" ||
+ Name == "arm.cde.vcx1qa.predicated.v2i64.v4i1" ||
+ Name == "arm.cde.vcx2q.predicated.v2i64.v4i1" ||
+ Name == "arm.cde.vcx2qa.predicated.v2i64.v4i1" ||
+ Name == "arm.cde.vcx3q.predicated.v2i64.v4i1" ||
+ Name == "arm.cde.vcx3qa.predicated.v2i64.v4i1")
+ return true;
+
+ if (Name == "amdgcn.alignbit") {
+ // Target specific intrinsic became redundant
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
+ {F->getReturnType()});
+ return true;
+ }
+
+ break;
+ }
+
+ case 'c': {
+ if (Name.startswith("ctlz.") && F->arg_size() == 1) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ F->arg_begin()->getType());
+ return true;
+ }
+ if (Name.startswith("cttz.") && F->arg_size() == 1) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
+ F->arg_begin()->getType());
+ return true;
+ }
+ break;
+ }
+ case 'd': {
+ if (Name == "dbg.value" && F->arg_size() == 4) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
+ return true;
+ }
+ break;
+ }
+ case 'e': {
+ if (Name.startswith("experimental.vector.extract.")) {
+ rename(F);
+ Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::vector_extract, Tys);
+ return true;
+ }
+
+ if (Name.startswith("experimental.vector.insert.")) {
+ rename(F);
+ auto Args = F->getFunctionType()->params();
+ Type *Tys[] = {Args[0], Args[1]};
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::vector_insert, Tys);
+ return true;
+ }
+
+ SmallVector<StringRef, 2> Groups;
+ static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+");
+ if (R.match(Name, &Groups)) {
+ Intrinsic::ID ID;
+ ID = StringSwitch<Intrinsic::ID>(Groups[1])
+ .Case("add", Intrinsic::vector_reduce_add)
+ .Case("mul", Intrinsic::vector_reduce_mul)
+ .Case("and", Intrinsic::vector_reduce_and)
+ .Case("or", Intrinsic::vector_reduce_or)
+ .Case("xor", Intrinsic::vector_reduce_xor)
+ .Case("smax", Intrinsic::vector_reduce_smax)
+ .Case("smin", Intrinsic::vector_reduce_smin)
+ .Case("umax", Intrinsic::vector_reduce_umax)
+ .Case("umin", Intrinsic::vector_reduce_umin)
+ .Case("fmax", Intrinsic::vector_reduce_fmax)
+ .Case("fmin", Intrinsic::vector_reduce_fmin)
+ .Default(Intrinsic::not_intrinsic);
+ if (ID != Intrinsic::not_intrinsic) {
+ rename(F);
+ auto Args = F->getFunctionType()->params();
+ NewFn = Intrinsic::getDeclaration(F->getParent(), ID, {Args[0]});
+ return true;
+ }
+ }
+ static const Regex R2(
+ "^experimental.vector.reduce.v2.([a-z]+)\\.[fi][0-9]+");
+ Groups.clear();
+ if (R2.match(Name, &Groups)) {
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ if (Groups[1] == "fadd")
+ ID = Intrinsic::vector_reduce_fadd;
+ if (Groups[1] == "fmul")
+ ID = Intrinsic::vector_reduce_fmul;
+ if (ID != Intrinsic::not_intrinsic) {
+ rename(F);
+ auto Args = F->getFunctionType()->params();
+ Type *Tys[] = {Args[1]};
+ NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ return true;
+ }
+ }
+ break;
+ }
+ case 'i':
+ case 'l': {
+ bool IsLifetimeStart = Name.startswith("lifetime.start");
+ if (IsLifetimeStart || Name.startswith("invariant.start")) {
+ Intrinsic::ID ID = IsLifetimeStart ?
+ Intrinsic::lifetime_start : Intrinsic::invariant_start;
+ auto Args = F->getFunctionType()->params();
+ Type* ObjectPtr[1] = {Args[1]};
+ if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
+ return true;
+ }
+ }
+
+ bool IsLifetimeEnd = Name.startswith("lifetime.end");
+ if (IsLifetimeEnd || Name.startswith("invariant.end")) {
+ Intrinsic::ID ID = IsLifetimeEnd ?
+ Intrinsic::lifetime_end : Intrinsic::invariant_end;
+
+ auto Args = F->getFunctionType()->params();
+ Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
+ if (F->getName() != Intrinsic::getName(ID, ObjectPtr, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
+ return true;
+ }
+ }
+ if (Name.startswith("invariant.group.barrier")) {
+ // Rename invariant.group.barrier to launder.invariant.group
+ auto Args = F->getFunctionType()->params();
+ Type* ObjectPtr[1] = {Args[0]};
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::launder_invariant_group, ObjectPtr);
+ return true;
+
+ }
+
+ break;
+ }
+ case 'm': {
+ if (Name.startswith("masked.load.")) {
+ Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
+ if (F->getName() !=
+ Intrinsic::getName(Intrinsic::masked_load, Tys, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_load,
+ Tys);
+ return true;
+ }
+ }
+ if (Name.startswith("masked.store.")) {
+ auto Args = F->getFunctionType()->params();
+ Type *Tys[] = { Args[0], Args[1] };
+ if (F->getName() !=
+ Intrinsic::getName(Intrinsic::masked_store, Tys, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_store,
+ Tys);
+ return true;
+ }
+ }
+ // Renaming gather/scatter intrinsics with no address space overloading
+ // to the new overload which includes an address space
+ if (Name.startswith("masked.gather.")) {
+ Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
+ if (F->getName() !=
+ Intrinsic::getName(Intrinsic::masked_gather, Tys, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_gather, Tys);
+ return true;
+ }
+ }
+ if (Name.startswith("masked.scatter.")) {
+ auto Args = F->getFunctionType()->params();
+ Type *Tys[] = {Args[0], Args[1]};
+ if (F->getName() !=
+ Intrinsic::getName(Intrinsic::masked_scatter, Tys, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_scatter, Tys);
+ return true;
+ }
+ }
+ // Updating the memory intrinsics (memcpy/memmove/memset) that have an
+ // alignment parameter to embedding the alignment as an attribute of
+ // the pointer args.
+ if (Name.startswith("memcpy.") && F->arg_size() == 5) {
+ rename(F);
+ // Get the types of dest, src, and len
+ ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
+ ParamTypes);
+ return true;
+ }
+ if (Name.startswith("memmove.") && F->arg_size() == 5) {
+ rename(F);
+ // Get the types of dest, src, and len
+ ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
+ ParamTypes);
+ return true;
+ }
+ if (Name.startswith("memset.") && F->arg_size() == 5) {
+ rename(F);
+ // Get the types of dest, and len
+ const auto *FT = F->getFunctionType();
+ Type *ParamTypes[2] = {
+ FT->getParamType(0), // Dest
+ FT->getParamType(2) // len
+ };
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
+ ParamTypes);
+ return true;
+ }
+ break;
+ }
+ case 'n': {
+ if (Name.startswith("nvvm.")) {
+ Name = Name.substr(5);
+
+ // The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
+ Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
+ .Cases("brev32", "brev64", Intrinsic::bitreverse)
+ .Case("clz.i", Intrinsic::ctlz)
+ .Case("popc.i", Intrinsic::ctpop)
+ .Default(Intrinsic::not_intrinsic);
+ if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
+ {F->getReturnType()});
+ return true;
+ }
+
+ // The following nvvm intrinsics correspond exactly to an LLVM idiom, but
+ // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
+ //
+ // TODO: We could add lohi.i2d.
+ bool Expand = StringSwitch<bool>(Name)
+ .Cases("abs.i", "abs.ll", true)
+ .Cases("clz.ll", "popc.ll", "h2f", true)
+ .Cases("max.i", "max.ll", "max.ui", "max.ull", true)
+ .Cases("min.i", "min.ll", "min.ui", "min.ull", true)
+ .StartsWith("atomic.load.add.f32.p", true)
+ .StartsWith("atomic.load.add.f64.p", true)
+ .Default(false);
+ if (Expand) {
+ NewFn = nullptr;
+ return true;
+ }
+ }
+ break;
+ }
+ case 'o':
+ // We only need to change the name to match the mangling including the
+ // address space.
+ if (Name.startswith("objectsize.")) {
+ Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
+ if (F->arg_size() == 2 || F->arg_size() == 3 ||
+ F->getName() !=
+ Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
+ Tys);
+ return true;
+ }
+ }
+ break;
+
+ case 'p':
+ if (Name == "prefetch") {
+ // Handle address space overloading.
+ Type *Tys[] = {F->arg_begin()->getType()};
+ if (F->getName() !=
+ Intrinsic::getName(Intrinsic::prefetch, Tys, F->getParent())) {
+ rename(F);
+ NewFn =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
+ return true;
+ }
+ } else if (Name.startswith("ptr.annotation.") && F->arg_size() == 4) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::ptr_annotation,
+ F->arg_begin()->getType());
+ return true;
+ }
+ break;
+
+ case 's':
+ if (Name == "stackprotectorcheck") {
+ NewFn = nullptr;
+ return true;
+ }
+ break;
+
+ case 'v': {
+ if (Name == "var.annotation" && F->arg_size() == 4) {
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::var_annotation);
+ return true;
+ }
+ break;
+ }
+
+ case 'x':
+ if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
+ return true;
+ }
+
+ auto *ST = dyn_cast<StructType>(F->getReturnType());
+ if (ST && (!ST->isLiteral() || ST->isPacked())) {
+ // Replace return type with literal non-packed struct. Only do this for
+ // intrinsics declared to return a struct, not for intrinsics with
+ // overloaded return type, in which case the exact struct type will be
+ // mangled into the name.
+ SmallVector<Intrinsic::IITDescriptor> Desc;
+ Intrinsic::getIntrinsicInfoTableEntries(F->getIntrinsicID(), Desc);
+ if (Desc.front().Kind == Intrinsic::IITDescriptor::Struct) {
+ auto *FT = F->getFunctionType();
+ auto *NewST = StructType::get(ST->getContext(), ST->elements());
+ auto *NewFT = FunctionType::get(NewST, FT->params(), FT->isVarArg());
+ std::string Name = F->getName().str();
+ rename(F);
+ NewFn = Function::Create(NewFT, F->getLinkage(), F->getAddressSpace(),
+ Name, F->getParent());
+
+ // The new function may also need remangling.
+ if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F))
+ NewFn = *Result;
+ return true;
+ }
+ }
+
+ // Remangle our intrinsic since we upgrade the mangling
+ auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
+ if (Result != None) {
+ NewFn = *Result;
+ return true;
+ }
+
+ // This may not belong here. This function is effectively being overloaded
+ // to both detect an intrinsic which needs upgrading, and to provide the
+ // upgraded form of the intrinsic. We should perhaps have two separate
+ // functions for this.
+ return false;
+}
+
+bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
+ NewFn = nullptr;
+ bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
+ assert(F != NewFn && "Intrinsic function upgraded to the same function");
+
+ // Upgrade intrinsic attributes. This does not change the function.
+ if (NewFn)
+ F = NewFn;
+ if (Intrinsic::ID id = F->getIntrinsicID())
+ F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
+ return Upgraded;
+}
+
+GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
+ if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
+ GV->getName() == "llvm.global_dtors")) ||
+ !GV->hasInitializer())
+ return nullptr;
+ ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
+ if (!ATy)
+ return nullptr;
+ StructType *STy = dyn_cast<StructType>(ATy->getElementType());
+ if (!STy || STy->getNumElements() != 2)
+ return nullptr;
+
+ LLVMContext &C = GV->getContext();
+ IRBuilder<> IRB(C);
+ auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
+ IRB.getInt8PtrTy());
+ Constant *Init = GV->getInitializer();
+ unsigned N = Init->getNumOperands();
+ std::vector<Constant *> NewCtors(N);
+ for (unsigned i = 0; i != N; ++i) {
+ auto Ctor = cast<Constant>(Init->getOperand(i));
+ NewCtors[i] = ConstantStruct::get(
+ EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
+ Constant::getNullValue(IRB.getInt8PtrTy()));
+ }
+ Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
+
+ return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
+ NewInit, GV->getName());
+}
+
+// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
+// to byte shuffles.
+static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
+ Value *Op, unsigned Shift) {
+ auto *ResultTy = cast<FixedVectorType>(Op->getType());
+ unsigned NumElts = ResultTy->getNumElements() * 8;
+
+ // Bitcast from a 64-bit element type to a byte element type.
+ Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
+ Op = Builder.CreateBitCast(Op, VecTy, "cast");
+
+ // We'll be shuffling in zeroes.
+ Value *Res = Constant::getNullValue(VecTy);
+
+ // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
+ // we'll just return the zero vector.
+ if (Shift < 16) {
+ int Idxs[64];
+ // 256/512-bit version is split into 2/4 16-byte lanes.
+ for (unsigned l = 0; l != NumElts; l += 16)
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = NumElts + i - Shift;
+ if (Idx < NumElts)
+ Idx -= NumElts - 16; // end of lane, switch operand.
+ Idxs[l + i] = Idx + l;
+ }
+
+ Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
+ }
+
+ // Bitcast back to a 64-bit element type.
+ return Builder.CreateBitCast(Res, ResultTy, "cast");
+}
+
+// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
+// to byte shuffles.
+static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
+ unsigned Shift) {
+ auto *ResultTy = cast<FixedVectorType>(Op->getType());
+ unsigned NumElts = ResultTy->getNumElements() * 8;
+
+ // Bitcast from a 64-bit element type to a byte element type.
+ Type *VecTy = FixedVectorType::get(Builder.getInt8Ty(), NumElts);
+ Op = Builder.CreateBitCast(Op, VecTy, "cast");
+
+ // We'll be shuffling in zeroes.
+ Value *Res = Constant::getNullValue(VecTy);
+
+ // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
+ // we'll just return the zero vector.
+ if (Shift < 16) {
+ int Idxs[64];
+ // 256/512-bit version is split into 2/4 16-byte lanes.
+ for (unsigned l = 0; l != NumElts; l += 16)
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = i + Shift;
+ if (Idx >= 16)
+ Idx += NumElts - 16; // end of lane, switch operand.
+ Idxs[l + i] = Idx + l;
+ }
+
+ Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
+ }
+
+ // Bitcast back to a 64-bit element type.
+ return Builder.CreateBitCast(Res, ResultTy, "cast");
+}
+
+static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
+ unsigned NumElts) {
+ assert(isPowerOf2_32(NumElts) && "Expected power-of-2 mask elements");
+ llvm::VectorType *MaskTy = FixedVectorType::get(
+ Builder.getInt1Ty(), cast<IntegerType>(Mask->getType())->getBitWidth());
+ Mask = Builder.CreateBitCast(Mask, MaskTy);
+
+ // If we have less than 8 elements (1, 2 or 4), then the starting mask was an
+ // i8 and we need to extract down to the right number of elements.
+ if (NumElts <= 4) {
+ int Indices[4];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i;
+ Mask = Builder.CreateShuffleVector(
+ Mask, Mask, makeArrayRef(Indices, NumElts), "extract");
+ }
+
+ return Mask;
+}
+
+static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
+ Value *Op0, Value *Op1) {
+ // If the mask is all ones just emit the first operation.
+ if (const auto *C = dyn_cast<Constant>(Mask))
+ if (C->isAllOnesValue())
+ return Op0;
+
+ Mask = getX86MaskVec(Builder, Mask,
+ cast<FixedVectorType>(Op0->getType())->getNumElements());
+ return Builder.CreateSelect(Mask, Op0, Op1);
+}
+
+static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
+ Value *Op0, Value *Op1) {
+ // If the mask is all ones just emit the first operation.
+ if (const auto *C = dyn_cast<Constant>(Mask))
+ if (C->isAllOnesValue())
+ return Op0;
+
+ auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(),
+ Mask->getType()->getIntegerBitWidth());
+ Mask = Builder.CreateBitCast(Mask, MaskTy);
+ Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
+ return Builder.CreateSelect(Mask, Op0, Op1);
+}
+
+// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
+// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
+// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
+static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
+ Value *Op1, Value *Shift,
+ Value *Passthru, Value *Mask,
+ bool IsVALIGN) {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
+
+ unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
+ assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
+ assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
+ assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
+
+ // Mask the immediate for VALIGN.
+ if (IsVALIGN)
+ ShiftVal &= (NumElts - 1);
+
+ // If palignr is shifting the pair of vectors more than the size of two
+ // lanes, emit zero.
+ if (ShiftVal >= 32)
+ return llvm::Constant::getNullValue(Op0->getType());
+
+ // If palignr is shifting the pair of input vectors more than one lane,
+ // but less than two lanes, convert to shifting in zeroes.
+ if (ShiftVal > 16) {
+ ShiftVal -= 16;
+ Op1 = Op0;
+ Op0 = llvm::Constant::getNullValue(Op0->getType());
+ }
+
+ int Indices[64];
+ // 256-bit palignr operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l < NumElts; l += 16) {
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = ShiftVal + i;
+ if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
+ Idx += NumElts - 16; // End of lane, switch operand.
+ Indices[l + i] = Idx + l;
+ }
+ }
+
+ Value *Align = Builder.CreateShuffleVector(Op1, Op0,
+ makeArrayRef(Indices, NumElts),
+ "palignr");
+
+ return EmitX86Select(Builder, Mask, Align, Passthru);
+}
+
+static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
+ bool ZeroMask, bool IndexForm) {
+ Type *Ty = CI.getType();
+ unsigned VecWidth = Ty->getPrimitiveSizeInBits();
+ unsigned EltWidth = Ty->getScalarSizeInBits();
+ bool IsFloat = Ty->isFPOrFPVectorTy();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
+ else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_d_128;
+ else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
+ else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_q_128;
+ else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
+ else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_d_256;
+ else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
+ else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_q_256;
+ else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
+ else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_d_512;
+ else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
+ else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_q_512;
+ else if (VecWidth == 128 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
+ else if (VecWidth == 256 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
+ else if (VecWidth == 512 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
+ else if (VecWidth == 128 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
+ else if (VecWidth == 256 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
+ else if (VecWidth == 512 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
+ CI.getArgOperand(2) };
+
+ // If this isn't index form we need to swap operand 0 and 1.
+ if (!IndexForm)
+ std::swap(Args[0], Args[1]);
+
+ Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
+ Args);
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
+ : Builder.CreateBitCast(CI.getArgOperand(1),
+ Ty);
+ return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
+}
+
+static Value *UpgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
+ Intrinsic::ID IID) {
+ Type *Ty = CI.getType();
+ Value *Op0 = CI.getOperand(0);
+ Value *Op1 = CI.getOperand(1);
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
+
+ if (CI.arg_size() == 4) { // For masked intrinsics.
+ Value *VecSrc = CI.getOperand(2);
+ Value *Mask = CI.getOperand(3);
+ Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ }
+ return Res;
+}
+
+static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
+ bool IsRotateRight) {
+ Type *Ty = CI.getType();
+ Value *Src = CI.getArgOperand(0);
+ Value *Amt = CI.getArgOperand(1);
+
+ // Amount may be scalar immediate, in which case create a splat vector.
+ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+ // we only care about the lowest log2 bits anyway.
+ if (Amt->getType() != Ty) {
+ unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
+ Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+ Amt = Builder.CreateVectorSplat(NumElts, Amt);
+ }
+
+ Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
+
+ if (CI.arg_size() == 4) { // For masked intrinsics.
+ Value *VecSrc = CI.getOperand(2);
+ Value *Mask = CI.getOperand(3);
+ Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ }
+ return Res;
+}
+
+static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
+ bool IsSigned) {
+ Type *Ty = CI.getType();
+ Value *LHS = CI.getArgOperand(0);
+ Value *RHS = CI.getArgOperand(1);
+
+ CmpInst::Predicate Pred;
+ switch (Imm) {
+ case 0x0:
+ Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ break;
+ case 0x1:
+ Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
+ break;
+ case 0x2:
+ Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ break;
+ case 0x3:
+ Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
+ break;
+ case 0x4:
+ Pred = ICmpInst::ICMP_EQ;
+ break;
+ case 0x5:
+ Pred = ICmpInst::ICMP_NE;
+ break;
+ case 0x6:
+ return Constant::getNullValue(Ty); // FALSE
+ case 0x7:
+ return Constant::getAllOnesValue(Ty); // TRUE
+ default:
+ llvm_unreachable("Unknown XOP vpcom/vpcomu predicate");
+ }
+
+ Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
+ Value *Ext = Builder.CreateSExt(Cmp, Ty);
+ return Ext;
+}
+
+static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
+ bool IsShiftRight, bool ZeroMask) {
+ Type *Ty = CI.getType();
+ Value *Op0 = CI.getArgOperand(0);
+ Value *Op1 = CI.getArgOperand(1);
+ Value *Amt = CI.getArgOperand(2);
+
+ if (IsShiftRight)
+ std::swap(Op0, Op1);
+
+ // Amount may be scalar immediate, in which case create a splat vector.
+ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+ // we only care about the lowest log2 bits anyway.
+ if (Amt->getType() != Ty) {
+ unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
+ Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+ Amt = Builder.CreateVectorSplat(NumElts, Amt);
+ }
+
+ Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
+
+ unsigned NumArgs = CI.arg_size();
+ if (NumArgs >= 4) { // For masked intrinsics.
+ Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
+ ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
+ CI.getArgOperand(0);
+ Value *Mask = CI.getOperand(NumArgs - 1);
+ Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ }
+ return Res;
+}
+
+static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
+ Value *Ptr, Value *Data, Value *Mask,
+ bool Aligned) {
+ // Cast the pointer to the right type.
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(Data->getType()));
+ const Align Alignment =
+ Aligned
+ ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedSize() / 8)
+ : Align(1);
+
+ // If the mask is all ones just emit a regular store.
+ if (const auto *C = dyn_cast<Constant>(Mask))
+ if (C->isAllOnesValue())
+ return Builder.CreateAlignedStore(Data, Ptr, Alignment);
+
+ // Convert the mask from an integer type to a vector of i1.
+ unsigned NumElts = cast<FixedVectorType>(Data->getType())->getNumElements();
+ Mask = getX86MaskVec(Builder, Mask, NumElts);
+ return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
+}
+
+static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
+ Value *Ptr, Value *Passthru, Value *Mask,
+ bool Aligned) {
+ Type *ValTy = Passthru->getType();
+ // Cast the pointer to the right type.
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
+ const Align Alignment =
+ Aligned
+ ? Align(Passthru->getType()->getPrimitiveSizeInBits().getFixedSize() /
+ 8)
+ : Align(1);
+
+ // If the mask is all ones just emit a regular store.
+ if (const auto *C = dyn_cast<Constant>(Mask))
+ if (C->isAllOnesValue())
+ return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
+
+ // Convert the mask from an integer type to a vector of i1.
+ unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
+ Mask = getX86MaskVec(Builder, Mask, NumElts);
+ return Builder.CreateMaskedLoad(ValTy, Ptr, Alignment, Mask, Passthru);
+}
+
+static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
+ Type *Ty = CI.getType();
+ Value *Op0 = CI.getArgOperand(0);
+ Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
+ Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
+ if (CI.arg_size() == 3)
+ Res = EmitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
+ return Res;
+}
+
+static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {
+ Type *Ty = CI.getType();
+
+ // Arguments have a vXi32 type so cast to vXi64.
+ Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
+ Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
+
+ if (IsSigned) {
+ // Shift left then arithmetic shift right.
+ Constant *ShiftAmt = ConstantInt::get(Ty, 32);
+ LHS = Builder.CreateShl(LHS, ShiftAmt);
+ LHS = Builder.CreateAShr(LHS, ShiftAmt);
+ RHS = Builder.CreateShl(RHS, ShiftAmt);
+ RHS = Builder.CreateAShr(RHS, ShiftAmt);
+ } else {
+ // Clear the upper bits.
+ Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
+ LHS = Builder.CreateAnd(LHS, Mask);
+ RHS = Builder.CreateAnd(RHS, Mask);
+ }
+
+ Value *Res = Builder.CreateMul(LHS, RHS);
+
+ if (CI.arg_size() == 4)
+ Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
+
+ return Res;
+}
+
+// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
+static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
+ Value *Mask) {
+ unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
+ if (Mask) {
+ const auto *C = dyn_cast<Constant>(Mask);
+ if (!C || !C->isAllOnesValue())
+ Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
+ }
+
+ if (NumElts < 8) {
+ int Indices[8];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i;
+ for (unsigned i = NumElts; i != 8; ++i)
+ Indices[i] = NumElts + i % NumElts;
+ Vec = Builder.CreateShuffleVector(Vec,
+ Constant::getNullValue(Vec->getType()),
+ Indices);
+ }
+ return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
+}
+
+static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
+ unsigned CC, bool Signed) {
+ Value *Op0 = CI.getArgOperand(0);
+ unsigned NumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
+
+ Value *Cmp;
+ if (CC == 3) {
+ Cmp = Constant::getNullValue(
+ FixedVectorType::get(Builder.getInt1Ty(), NumElts));
+ } else if (CC == 7) {
+ Cmp = Constant::getAllOnesValue(
+ FixedVectorType::get(Builder.getInt1Ty(), NumElts));
+ } else {
+ ICmpInst::Predicate Pred;
+ switch (CC) {
+ default: llvm_unreachable("Unknown condition code");
+ case 0: Pred = ICmpInst::ICMP_EQ; break;
+ case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
+ case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
+ case 4: Pred = ICmpInst::ICMP_NE; break;
+ case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
+ case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
+ }
+ Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
+ }
+
+ Value *Mask = CI.getArgOperand(CI.arg_size() - 1);
+
+ return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
+}
+
+// Replace a masked intrinsic with an older unmasked intrinsic.
+static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
+ Intrinsic::ID IID) {
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
+ Value *Rep = Builder.CreateCall(Intrin,
+ { CI.getArgOperand(0), CI.getArgOperand(1) });
+ return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
+}
+
+static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {
+ Value* A = CI.getArgOperand(0);
+ Value* B = CI.getArgOperand(1);
+ Value* Src = CI.getArgOperand(2);
+ Value* Mask = CI.getArgOperand(3);
+
+ Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
+ Value* Cmp = Builder.CreateIsNotNull(AndNode);
+ Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
+ Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
+ Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
+ return Builder.CreateInsertElement(A, Select, (uint64_t)0);
+}
+
+
+static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {
+ Value* Op = CI.getArgOperand(0);
+ Type* ReturnOp = CI.getType();
+ unsigned NumElts = cast<FixedVectorType>(CI.getType())->getNumElements();
+ Value *Mask = getX86MaskVec(Builder, Op, NumElts);
+ return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
+}
+
+// Replace intrinsic with unmasked version and a select.
+static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
+ CallBase &CI, Value *&Rep) {
+ Name = Name.substr(12); // Remove avx512.mask.
+
+ unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
+ unsigned EltWidth = CI.getType()->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (Name.startswith("max.p")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_sse_max_ps;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_sse2_max_pd;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx_max_ps_256;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx_max_pd_256;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("min.p")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_sse_min_ps;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_sse2_min_pd;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx_min_ps_256;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx_min_pd_256;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pshuf.b.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_ssse3_pshuf_b_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pshuf_b;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pshuf_b_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pmul.hr.sw.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pmul_hr_sw;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pmulh.w.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse2_pmulh_w;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pmulh_w;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmulh_w_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pmulhu.w.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse2_pmulhu_w;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pmulhu_w;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmulhu_w_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pmaddw.d.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse2_pmadd_wd;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pmadd_wd;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmaddw_d_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pmaddubs.w.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pmadd_ub_sw;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmaddubs_w_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("packsswb.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse2_packsswb_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_packsswb;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_packsswb_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("packssdw.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse2_packssdw_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_packssdw;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_packssdw_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("packuswb.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse2_packuswb_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_packuswb;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_packuswb_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("packusdw.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_sse41_packusdw;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx2_packusdw;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_packusdw_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("vpermilvar.")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx_vpermilvar_ps;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx_vpermilvar_pd;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx_vpermilvar_ps_256;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx_vpermilvar_pd_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name == "cvtpd2dq.256") {
+ IID = Intrinsic::x86_avx_cvt_pd2dq_256;
+ } else if (Name == "cvtpd2ps.256") {
+ IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
+ } else if (Name == "cvttpd2dq.256") {
+ IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
+ } else if (Name == "cvttps2dq.128") {
+ IID = Intrinsic::x86_sse2_cvttps2dq;
+ } else if (Name == "cvttps2dq.256") {
+ IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
+ } else if (Name.startswith("permvar.")) {
+ bool IsFloat = CI.getType()->isFPOrFPVectorTy();
+ if (VecWidth == 256 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx2_permps;
+ else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx2_permd;
+ else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_permvar_df_256;
+ else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_permvar_di_256;
+ else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_permvar_sf_512;
+ else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_permvar_si_512;
+ else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_permvar_df_512;
+ else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_permvar_di_512;
+ else if (VecWidth == 128 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_permvar_hi_128;
+ else if (VecWidth == 256 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_permvar_hi_256;
+ else if (VecWidth == 512 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_permvar_hi_512;
+ else if (VecWidth == 128 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_permvar_qi_128;
+ else if (VecWidth == 256 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_permvar_qi_256;
+ else if (VecWidth == 512 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_permvar_qi_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("dbpsadbw.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_avx512_dbpsadbw_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx512_dbpsadbw_256;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_dbpsadbw_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pmultishift.qb.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_avx512_pmultishift_qb_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx512_pmultishift_qb_256;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmultishift_qb_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("conflict.")) {
+ if (Name[9] == 'd' && VecWidth == 128)
+ IID = Intrinsic::x86_avx512_conflict_d_128;
+ else if (Name[9] == 'd' && VecWidth == 256)
+ IID = Intrinsic::x86_avx512_conflict_d_256;
+ else if (Name[9] == 'd' && VecWidth == 512)
+ IID = Intrinsic::x86_avx512_conflict_d_512;
+ else if (Name[9] == 'q' && VecWidth == 128)
+ IID = Intrinsic::x86_avx512_conflict_q_128;
+ else if (Name[9] == 'q' && VecWidth == 256)
+ IID = Intrinsic::x86_avx512_conflict_q_256;
+ else if (Name[9] == 'q' && VecWidth == 512)
+ IID = Intrinsic::x86_avx512_conflict_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pavg.")) {
+ if (Name[5] == 'b' && VecWidth == 128)
+ IID = Intrinsic::x86_sse2_pavg_b;
+ else if (Name[5] == 'b' && VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pavg_b;
+ else if (Name[5] == 'b' && VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pavg_b_512;
+ else if (Name[5] == 'w' && VecWidth == 128)
+ IID = Intrinsic::x86_sse2_pavg_w;
+ else if (Name[5] == 'w' && VecWidth == 256)
+ IID = Intrinsic::x86_avx2_pavg_w;
+ else if (Name[5] == 'w' && VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pavg_w_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else
+ return false;
+
+ SmallVector<Value *, 4> Args(CI.args());
+ Args.pop_back();
+ Args.pop_back();
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
+ Args);
+ unsigned NumArgs = CI.arg_size();
+ Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
+ CI.getArgOperand(NumArgs - 2));
+ return true;
+}
+
+/// Upgrade comment in call to inline asm that represents an objc retain release
+/// marker.
+void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
+ size_t Pos;
+ if (AsmStr->find("mov\tfp") == 0 &&
+ AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
+ (Pos = AsmStr->find("# marker")) != std::string::npos) {
+ AsmStr->replace(Pos, 1, ";");
+ }
+}
+
+static Value *UpgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
+ IRBuilder<> &Builder) {
+ if (Name == "mve.vctp64.old") {
+ // Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
+ // correct type.
+ Value *VCTP = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
+ CI->getArgOperand(0), CI->getName());
+ Value *C1 = Builder.CreateCall(
+ Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::arm_mve_pred_v2i,
+ {VectorType::get(Builder.getInt1Ty(), 2, false)}),
+ VCTP);
+ return Builder.CreateCall(
+ Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::arm_mve_pred_i2v,
+ {VectorType::get(Builder.getInt1Ty(), 4, false)}),
+ C1);
+ } else if (Name == "mve.mull.int.predicated.v2i64.v4i32.v4i1" ||
+ Name == "mve.vqdmull.predicated.v2i64.v4i32.v4i1" ||
+ Name == "mve.vldr.gather.base.predicated.v2i64.v2i64.v4i1" ||
+ Name == "mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1" ||
+ Name == "mve.vldr.gather.offset.predicated.v2i64.p0i64.v2i64.v4i1" ||
+ Name == "mve.vstr.scatter.base.predicated.v2i64.v2i64.v4i1" ||
+ Name == "mve.vstr.scatter.base.wb.predicated.v2i64.v2i64.v4i1" ||
+ Name == "mve.vstr.scatter.offset.predicated.p0i64.v2i64.v2i64.v4i1" ||
+ Name == "cde.vcx1q.predicated.v2i64.v4i1" ||
+ Name == "cde.vcx1qa.predicated.v2i64.v4i1" ||
+ Name == "cde.vcx2q.predicated.v2i64.v4i1" ||
+ Name == "cde.vcx2qa.predicated.v2i64.v4i1" ||
+ Name == "cde.vcx3q.predicated.v2i64.v4i1" ||
+ Name == "cde.vcx3qa.predicated.v2i64.v4i1") {
+ std::vector<Type *> Tys;
+ unsigned ID = CI->getIntrinsicID();
+ Type *V2I1Ty = FixedVectorType::get(Builder.getInt1Ty(), 2);
+ switch (ID) {
+ case Intrinsic::arm_mve_mull_int_predicated:
+ case Intrinsic::arm_mve_vqdmull_predicated:
+ case Intrinsic::arm_mve_vldr_gather_base_predicated:
+ Tys = {CI->getType(), CI->getOperand(0)->getType(), V2I1Ty};
+ break;
+ case Intrinsic::arm_mve_vldr_gather_base_wb_predicated:
+ case Intrinsic::arm_mve_vstr_scatter_base_predicated:
+ case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated:
+ Tys = {CI->getOperand(0)->getType(), CI->getOperand(0)->getType(),
+ V2I1Ty};
+ break;
+ case Intrinsic::arm_mve_vldr_gather_offset_predicated:
+ Tys = {CI->getType(), CI->getOperand(0)->getType(),
+ CI->getOperand(1)->getType(), V2I1Ty};
+ break;
+ case Intrinsic::arm_mve_vstr_scatter_offset_predicated:
+ Tys = {CI->getOperand(0)->getType(), CI->getOperand(1)->getType(),
+ CI->getOperand(2)->getType(), V2I1Ty};
+ break;
+ case Intrinsic::arm_cde_vcx1q_predicated:
+ case Intrinsic::arm_cde_vcx1qa_predicated:
+ case Intrinsic::arm_cde_vcx2q_predicated:
+ case Intrinsic::arm_cde_vcx2qa_predicated:
+ case Intrinsic::arm_cde_vcx3q_predicated:
+ case Intrinsic::arm_cde_vcx3qa_predicated:
+ Tys = {CI->getOperand(1)->getType(), V2I1Ty};
+ break;
+ default:
+ llvm_unreachable("Unhandled Intrinsic!");
+ }
+
+ std::vector<Value *> Ops;
+ for (Value *Op : CI->args()) {
+ Type *Ty = Op->getType();
+ if (Ty->getScalarSizeInBits() == 1) {
+ Value *C1 = Builder.CreateCall(
+ Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::arm_mve_pred_v2i,
+ {VectorType::get(Builder.getInt1Ty(), 4, false)}),
+ Op);
+ Op = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
+ C1);
+ }
+ Ops.push_back(Op);
+ }
+
+ Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ return Builder.CreateCall(Fn, Ops, CI->getName());
+ }
+ llvm_unreachable("Unknown function for ARM CallBase upgrade.");
+}
+
+/// Upgrade a call to an old intrinsic. All argument and return casting must be
+/// provided to seamlessly integrate with existing context.
+void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
+ Function *F = CI->getCalledFunction();
+ LLVMContext &C = CI->getContext();
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
+
+ assert(F && "Intrinsic call is not direct?");
+
+ if (!NewFn) {
+ // Get the Function's name.
+ StringRef Name = F->getName();
+
+ assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'");
+ Name = Name.substr(5);
+
+ bool IsX86 = Name.startswith("x86.");
+ if (IsX86)
+ Name = Name.substr(4);
+ bool IsNVVM = Name.startswith("nvvm.");
+ if (IsNVVM)
+ Name = Name.substr(5);
+ bool IsARM = Name.startswith("arm.");
+ if (IsARM)
+ Name = Name.substr(4);
+
+ if (IsX86 && Name.startswith("sse4a.movnt.")) {
+ Module *M = F->getParent();
+ SmallVector<Metadata *, 1> Elts;
+ Elts.push_back(
+ ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
+ MDNode *Node = MDNode::get(C, Elts);
+
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+
+ // Nontemporal (unaligned) store of the 0'th element of the float/double
+ // vector.
+ Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
+ PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
+ Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
+ Value *Extract =
+ Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
+
+ StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
+ SI->setMetadata(M->getMDKindID("nontemporal"), Node);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ }
+
+ if (IsX86 && (Name.startswith("avx.movnt.") ||
+ Name.startswith("avx512.storent."))) {
+ Module *M = F->getParent();
+ SmallVector<Metadata *, 1> Elts;
+ Elts.push_back(
+ ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
+ MDNode *Node = MDNode::get(C, Elts);
+
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(Arg0,
+ PointerType::getUnqual(Arg1->getType()),
+ "cast");
+ StoreInst *SI = Builder.CreateAlignedStore(
+ Arg1, BC,
+ Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
+ SI->setMetadata(M->getMDKindID("nontemporal"), Node);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ }
+
+ if (IsX86 && Name == "sse2.storel.dq") {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+
+ auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
+ Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
+ Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
+ Value *BC = Builder.CreateBitCast(Arg0,
+ PointerType::getUnqual(Elt->getType()),
+ "cast");
+ Builder.CreateAlignedStore(Elt, BC, Align(1));
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ }
+
+ if (IsX86 && (Name.startswith("sse.storeu.") ||
+ Name.startswith("sse2.storeu.") ||
+ Name.startswith("avx.storeu."))) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+
+ Arg0 = Builder.CreateBitCast(Arg0,
+ PointerType::getUnqual(Arg1->getType()),
+ "cast");
+ Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ }
+
+ if (IsX86 && Name == "avx512.mask.store.ss") {
+ Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
+ UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+ Mask, false);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ }
+
+ if (IsX86 && (Name.startswith("avx512.mask.store"))) {
+ // "avx512.mask.storeu." or "avx512.mask.store."
+ bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
+ UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), Aligned);
+
+ // Remove intrinsic.
+ CI->eraseFromParent();
+ return;
+ }
+
+ Value *Rep;
+ // Upgrade packed integer vector compare intrinsics to compare instructions.
+ if (IsX86 && (Name.startswith("sse2.pcmp") ||
+ Name.startswith("avx2.pcmp"))) {
+ // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
+ bool CmpEq = Name[9] == 'e';
+ Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
+ CI->getArgOperand(0), CI->getArgOperand(1));
+ Rep = Builder.CreateSExt(Rep, CI->getType(), "");
+ } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
+ Type *ExtTy = Type::getInt32Ty(C);
+ if (CI->getOperand(0)->getType()->isIntegerTy(8))
+ ExtTy = Type::getInt64Ty(C);
+ unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
+ ExtTy->getPrimitiveSizeInBits();
+ Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
+ Rep = Builder.CreateVectorSplat(NumElts, Rep);
+ } else if (IsX86 && (Name == "sse.sqrt.ss" ||
+ Name == "sse2.sqrt.sd")) {
+ Value *Vec = CI->getArgOperand(0);
+ Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
+ Function *Intr = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::sqrt, Elt0->getType());
+ Elt0 = Builder.CreateCall(Intr, Elt0);
+ Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
+ } else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
+ Name.startswith("sse2.sqrt.p") ||
+ Name.startswith("sse.sqrt.p"))) {
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::sqrt,
+ CI->getType()),
+ {CI->getArgOperand(0)});
+ } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) {
+ if (CI->arg_size() == 4 &&
+ (!isa<ConstantInt>(CI->getArgOperand(3)) ||
+ cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
+ Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
+ : Intrinsic::x86_avx512_sqrt_pd_512;
+
+ Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
+ IID), Args);
+ } else {
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::sqrt,
+ CI->getType()),
+ {CI->getArgOperand(0)});
+ }
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx512.ptestm") ||
+ Name.startswith("avx512.ptestnm"))) {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ Value *Mask = CI->getArgOperand(2);
+ Rep = Builder.CreateAnd(Op0, Op1);
+ llvm::Type *Ty = Op0->getType();
+ Value *Zero = llvm::Constant::getNullValue(Ty);
+ ICmpInst::Predicate Pred =
+ Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
+ Rep = Builder.CreateICmp(Pred, Rep, Zero);
+ Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
+ } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
+ unsigned NumElts = cast<FixedVectorType>(CI->getArgOperand(1)->getType())
+ ->getNumElements();
+ Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
+ unsigned NumElts = CI->getType()->getScalarSizeInBits();
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
+ int Indices[64];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i;
+
+ // First extract half of each vector. This gives better codegen than
+ // doing it in a single shuffle.
+ LHS = Builder.CreateShuffleVector(LHS, LHS,
+ makeArrayRef(Indices, NumElts / 2));
+ RHS = Builder.CreateShuffleVector(RHS, RHS,
+ makeArrayRef(Indices, NumElts / 2));
+ // Concat the vectors.
+ // NOTE: Operands have to be swapped to match intrinsic definition.
+ Rep = Builder.CreateShuffleVector(RHS, LHS,
+ makeArrayRef(Indices, NumElts));
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 && Name == "avx512.kand.w") {
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
+ Rep = Builder.CreateAnd(LHS, RHS);
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 && Name == "avx512.kandn.w") {
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
+ LHS = Builder.CreateNot(LHS);
+ Rep = Builder.CreateAnd(LHS, RHS);
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 && Name == "avx512.kor.w") {
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
+ Rep = Builder.CreateOr(LHS, RHS);
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 && Name == "avx512.kxor.w") {
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
+ Rep = Builder.CreateXor(LHS, RHS);
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 && Name == "avx512.kxnor.w") {
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
+ LHS = Builder.CreateNot(LHS);
+ Rep = Builder.CreateXor(LHS, RHS);
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 && Name == "avx512.knot.w") {
+ Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Rep = Builder.CreateNot(Rep);
+ Rep = Builder.CreateBitCast(Rep, CI->getType());
+ } else if (IsX86 &&
+ (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
+ Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
+ Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
+ Rep = Builder.CreateOr(LHS, RHS);
+ Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
+ Value *C;
+ if (Name[14] == 'c')
+ C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
+ else
+ C = ConstantInt::getNullValue(Builder.getInt16Ty());
+ Rep = Builder.CreateICmpEQ(Rep, C);
+ Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
+ } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
+ Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
+ Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
+ Name == "sse.div.ss" || Name == "sse2.div.sd")) {
+ Type *I32Ty = Type::getInt32Ty(C);
+ Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
+ ConstantInt::get(I32Ty, 0));
+ Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
+ ConstantInt::get(I32Ty, 0));
+ Value *EltOp;
+ if (Name.contains(".add."))
+ EltOp = Builder.CreateFAdd(Elt0, Elt1);
+ else if (Name.contains(".sub."))
+ EltOp = Builder.CreateFSub(Elt0, Elt1);
+ else if (Name.contains(".mul."))
+ EltOp = Builder.CreateFMul(Elt0, Elt1);
+ else
+ EltOp = Builder.CreateFDiv(Elt0, Elt1);
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
+ ConstantInt::get(I32Ty, 0));
+ } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
+ // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
+ bool CmpEq = Name[16] == 'e';
+ Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
+ } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
+ Type *OpTy = CI->getArgOperand(0)->getType();
+ unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
+ Intrinsic::ID IID;
+ switch (VecWidth) {
+ default: llvm_unreachable("Unexpected intrinsic");
+ case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
+ case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
+ case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
+ }
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getOperand(0), CI->getArgOperand(1) });
+ Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
+ Type *OpTy = CI->getArgOperand(0)->getType();
+ unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
+ unsigned EltWidth = OpTy->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_fpclass_ps_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_fpclass_ps_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_fpclass_ps_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_fpclass_pd_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_fpclass_pd_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_fpclass_pd_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getOperand(0), CI->getArgOperand(1) });
+ Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.cmp.p")) {
+ SmallVector<Value *, 4> Args(CI->args());
+ Type *OpTy = Args[0]->getType();
+ unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
+ unsigned EltWidth = OpTy->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Mask = Constant::getAllOnesValue(CI->getType());
+ if (VecWidth == 512)
+ std::swap(Mask, Args.back());
+ Args.push_back(Mask);
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ Args);
+ } else if (IsX86 && Name.startswith("avx512.mask.cmp.")) {
+ // Integer compare intrinsics.
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
+ } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) {
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
+ } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
+ Name.startswith("avx512.cvtw2mask.") ||
+ Name.startswith("avx512.cvtd2mask.") ||
+ Name.startswith("avx512.cvtq2mask."))) {
+ Value *Op = CI->getArgOperand(0);
+ Value *Zero = llvm::Constant::getNullValue(Op->getType());
+ Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
+ Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
+ } else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
+ Name == "ssse3.pabs.w.128" ||
+ Name == "ssse3.pabs.d.128" ||
+ Name.startswith("avx2.pabs") ||
+ Name.startswith("avx512.mask.pabs"))) {
+ Rep = upgradeAbs(Builder, *CI);
+ } else if (IsX86 && (Name == "sse41.pmaxsb" ||
+ Name == "sse2.pmaxs.w" ||
+ Name == "sse41.pmaxsd" ||
+ Name.startswith("avx2.pmaxs") ||
+ Name.startswith("avx512.mask.pmaxs"))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smax);
+ } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
+ Name == "sse41.pmaxuw" ||
+ Name == "sse41.pmaxud" ||
+ Name.startswith("avx2.pmaxu") ||
+ Name.startswith("avx512.mask.pmaxu"))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umax);
+ } else if (IsX86 && (Name == "sse41.pminsb" ||
+ Name == "sse2.pmins.w" ||
+ Name == "sse41.pminsd" ||
+ Name.startswith("avx2.pmins") ||
+ Name.startswith("avx512.mask.pmins"))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::smin);
+ } else if (IsX86 && (Name == "sse2.pminu.b" ||
+ Name == "sse41.pminuw" ||
+ Name == "sse41.pminud" ||
+ Name.startswith("avx2.pminu") ||
+ Name.startswith("avx512.mask.pminu"))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::umin);
+ } else if (IsX86 && (Name == "sse2.pmulu.dq" ||
+ Name == "avx2.pmulu.dq" ||
+ Name == "avx512.pmulu.dq.512" ||
+ Name.startswith("avx512.mask.pmulu.dq."))) {
+ Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
+ } else if (IsX86 && (Name == "sse41.pmuldq" ||
+ Name == "avx2.pmul.dq" ||
+ Name == "avx512.pmul.dq.512" ||
+ Name.startswith("avx512.mask.pmul.dq."))) {
+ Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
+ } else if (IsX86 && (Name == "sse.cvtsi2ss" ||
+ Name == "sse2.cvtsi2sd" ||
+ Name == "sse.cvtsi642ss" ||
+ Name == "sse2.cvtsi642sd")) {
+ Rep = Builder.CreateSIToFP(
+ CI->getArgOperand(1),
+ cast<VectorType>(CI->getType())->getElementType());
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
+ } else if (IsX86 && Name == "avx512.cvtusi2sd") {
+ Rep = Builder.CreateUIToFP(
+ CI->getArgOperand(1),
+ cast<VectorType>(CI->getType())->getElementType());
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
+ } else if (IsX86 && Name == "sse2.cvtss2sd") {
+ Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
+ Rep = Builder.CreateFPExt(
+ Rep, cast<VectorType>(CI->getType())->getElementType());
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
+ } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
+ Name == "sse2.cvtdq2ps" ||
+ Name == "avx.cvtdq2.pd.256" ||
+ Name == "avx.cvtdq2.ps.256" ||
+ Name.startswith("avx512.mask.cvtdq2pd.") ||
+ Name.startswith("avx512.mask.cvtudq2pd.") ||
+ Name.startswith("avx512.mask.cvtdq2ps.") ||
+ Name.startswith("avx512.mask.cvtudq2ps.") ||
+ Name.startswith("avx512.mask.cvtqq2pd.") ||
+ Name.startswith("avx512.mask.cvtuqq2pd.") ||
+ Name == "avx512.mask.cvtqq2ps.256" ||
+ Name == "avx512.mask.cvtqq2ps.512" ||
+ Name == "avx512.mask.cvtuqq2ps.256" ||
+ Name == "avx512.mask.cvtuqq2ps.512" ||
+ Name == "sse2.cvtps2pd" ||
+ Name == "avx.cvt.ps2.pd.256" ||
+ Name == "avx512.mask.cvtps2pd.128" ||
+ Name == "avx512.mask.cvtps2pd.256")) {
+ auto *DstTy = cast<FixedVectorType>(CI->getType());
+ Rep = CI->getArgOperand(0);
+ auto *SrcTy = cast<FixedVectorType>(Rep->getType());
+
+ unsigned NumDstElts = DstTy->getNumElements();
+ if (NumDstElts < SrcTy->getNumElements()) {
+ assert(NumDstElts == 2 && "Unexpected vector size");
+ Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1});
+ }
+
+ bool IsPS2PD = SrcTy->getElementType()->isFloatTy();
+ bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
+ if (IsPS2PD)
+ Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
+ else if (CI->arg_size() == 4 &&
+ (!isa<ConstantInt>(CI->getArgOperand(3)) ||
+ cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
+ Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
+ : Intrinsic::x86_avx512_sitofp_round;
+ Function *F = Intrinsic::getDeclaration(CI->getModule(), IID,
+ { DstTy, SrcTy });
+ Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
+ } else {
+ Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
+ : Builder.CreateSIToFP(Rep, DstTy, "cvt");
+ }
+
+ if (CI->arg_size() >= 3)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") ||
+ Name.startswith("vcvtph2ps."))) {
+ auto *DstTy = cast<FixedVectorType>(CI->getType());
+ Rep = CI->getArgOperand(0);
+ auto *SrcTy = cast<FixedVectorType>(Rep->getType());
+ unsigned NumDstElts = DstTy->getNumElements();
+ if (NumDstElts != SrcTy->getNumElements()) {
+ assert(NumDstElts == 4 && "Unexpected vector size");
+ Rep = Builder.CreateShuffleVector(Rep, Rep, ArrayRef<int>{0, 1, 2, 3});
+ }
+ Rep = Builder.CreateBitCast(
+ Rep, FixedVectorType::get(Type::getHalfTy(C), NumDstElts));
+ Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps");
+ if (CI->arg_size() >= 3)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && Name.startswith("avx512.mask.load")) {
+ // "avx512.mask.loadu." or "avx512.mask.load."
+ bool Aligned = Name[16] != 'u'; // "avx512.mask.loadu".
+ Rep =
+ UpgradeMaskedLoad(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), Aligned);
+ } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
+ auto *ResultTy = cast<FixedVectorType>(CI->getType());
+ Type *PtrTy = ResultTy->getElementType();
+
+ // Cast the pointer to element type.
+ Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
+ llvm::PointerType::getUnqual(PtrTy));
+
+ Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
+ ResultTy->getNumElements());
+
+ Function *ELd = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_expandload,
+ ResultTy);
+ Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
+ } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
+ auto *ResultTy = cast<VectorType>(CI->getArgOperand(1)->getType());
+ Type *PtrTy = ResultTy->getElementType();
+
+ // Cast the pointer to element type.
+ Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
+ llvm::PointerType::getUnqual(PtrTy));
+
+ Value *MaskVec =
+ getX86MaskVec(Builder, CI->getArgOperand(2),
+ cast<FixedVectorType>(ResultTy)->getNumElements());
+
+ Function *CSt = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::masked_compressstore,
+ ResultTy);
+ Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
+ } else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
+ Name.startswith("avx512.mask.expand."))) {
+ auto *ResultTy = cast<FixedVectorType>(CI->getType());
+
+ Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
+ ResultTy->getNumElements());
+
+ bool IsCompress = Name[12] == 'c';
+ Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
+ : Intrinsic::x86_avx512_mask_expand;
+ Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
+ Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
+ MaskVec });
+ } else if (IsX86 && Name.startswith("xop.vpcom")) {
+ bool IsSigned;
+ if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") ||
+ Name.endswith("uq"))
+ IsSigned = false;
+ else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") ||
+ Name.endswith("q"))
+ IsSigned = true;
+ else
+ llvm_unreachable("Unknown suffix");
+
+ unsigned Imm;
+ if (CI->arg_size() == 3) {
+ Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ } else {
+ Name = Name.substr(9); // strip off "xop.vpcom"
+ if (Name.startswith("lt"))
+ Imm = 0;
+ else if (Name.startswith("le"))
+ Imm = 1;
+ else if (Name.startswith("gt"))
+ Imm = 2;
+ else if (Name.startswith("ge"))
+ Imm = 3;
+ else if (Name.startswith("eq"))
+ Imm = 4;
+ else if (Name.startswith("ne"))
+ Imm = 5;
+ else if (Name.startswith("false"))
+ Imm = 6;
+ else if (Name.startswith("true"))
+ Imm = 7;
+ else
+ llvm_unreachable("Unknown condition");
+ }
+
+ Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
+ } else if (IsX86 && Name.startswith("xop.vpcmov")) {
+ Value *Sel = CI->getArgOperand(2);
+ Value *NotSel = Builder.CreateNot(Sel);
+ Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
+ Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
+ Rep = Builder.CreateOr(Sel0, Sel1);
+ } else if (IsX86 && (Name.startswith("xop.vprot") ||
+ Name.startswith("avx512.prol") ||
+ Name.startswith("avx512.mask.prol"))) {
+ Rep = upgradeX86Rotate(Builder, *CI, false);
+ } else if (IsX86 && (Name.startswith("avx512.pror") ||
+ Name.startswith("avx512.mask.pror"))) {
+ Rep = upgradeX86Rotate(Builder, *CI, true);
+ } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
+ Name.startswith("avx512.mask.vpshld") ||
+ Name.startswith("avx512.maskz.vpshld"))) {
+ bool ZeroMask = Name[11] == 'z';
+ Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
+ } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
+ Name.startswith("avx512.mask.vpshrd") ||
+ Name.startswith("avx512.maskz.vpshrd"))) {
+ bool ZeroMask = Name[11] == 'z';
+ Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
+ } else if (IsX86 && Name == "sse42.crc32.64.8") {
+ Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_sse42_crc32_32_8);
+ Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
+ Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
+ Rep = Builder.CreateZExt(Rep, CI->getType(), "");
+ } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
+ Name.startswith("avx512.vbroadcast.s"))) {
+ // Replace broadcasts with a series of insertelements.
+ auto *VecTy = cast<FixedVectorType>(CI->getType());
+ Type *EltTy = VecTy->getElementType();
+ unsigned EltNum = VecTy->getNumElements();
+ Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
+ EltTy->getPointerTo());
+ Value *Load = Builder.CreateLoad(EltTy, Cast);
+ Type *I32Ty = Type::getInt32Ty(C);
+ Rep = PoisonValue::get(VecTy);
+ for (unsigned I = 0; I < EltNum; ++I)
+ Rep = Builder.CreateInsertElement(Rep, Load,
+ ConstantInt::get(I32Ty, I));
+ } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
+ Name.startswith("sse41.pmovzx") ||
+ Name.startswith("avx2.pmovsx") ||
+ Name.startswith("avx2.pmovzx") ||
+ Name.startswith("avx512.mask.pmovsx") ||
+ Name.startswith("avx512.mask.pmovzx"))) {
+ auto *DstTy = cast<FixedVectorType>(CI->getType());
+ unsigned NumDstElts = DstTy->getNumElements();
+
+ // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
+ SmallVector<int, 8> ShuffleMask(NumDstElts);
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ ShuffleMask[i] = i;
+
+ Value *SV =
+ Builder.CreateShuffleVector(CI->getArgOperand(0), ShuffleMask);
+
+ bool DoSext = (StringRef::npos != Name.find("pmovsx"));
+ Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
+ : Builder.CreateZExt(SV, DstTy);
+ // If there are 3 arguments, it's a masked intrinsic so we need a select.
+ if (CI->arg_size() == 3)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (Name == "avx512.mask.pmov.qd.256" ||
+ Name == "avx512.mask.pmov.qd.512" ||
+ Name == "avx512.mask.pmov.wb.256" ||
+ Name == "avx512.mask.pmov.wb.512") {
+ Type *Ty = CI->getArgOperand(1)->getType();
+ Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
+ Name == "avx2.vbroadcasti128")) {
+ // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
+ Type *EltTy = cast<VectorType>(CI->getType())->getElementType();
+ unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
+ auto *VT = FixedVectorType::get(EltTy, NumSrcElts);
+ Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
+ PointerType::getUnqual(VT));
+ Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
+ if (NumSrcElts == 2)
+ Rep = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 0, 1});
+ else
+ Rep = Builder.CreateShuffleVector(
+ Load, ArrayRef<int>{0, 1, 2, 3, 0, 1, 2, 3});
+ } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
+ Name.startswith("avx512.mask.shuf.f"))) {
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ Type *VT = CI->getType();
+ unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
+ unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
+ unsigned ControlBitsMask = NumLanes - 1;
+ unsigned NumControlBits = NumLanes / 2;
+ SmallVector<int, 8> ShuffleMask(0);
+
+ for (unsigned l = 0; l != NumLanes; ++l) {
+ unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
+ // We actually need the other source.
+ if (l >= NumLanes / 2)
+ LaneMask += NumLanes;
+ for (unsigned i = 0; i != NumElementsInLane; ++i)
+ ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
+ }
+ Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
+ CI->getArgOperand(1), ShuffleMask);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
+ CI->getArgOperand(3));
+ }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
+ Name.startswith("avx512.mask.broadcasti"))) {
+ unsigned NumSrcElts =
+ cast<FixedVectorType>(CI->getArgOperand(0)->getType())
+ ->getNumElements();
+ unsigned NumDstElts =
+ cast<FixedVectorType>(CI->getType())->getNumElements();
+
+ SmallVector<int, 8> ShuffleMask(NumDstElts);
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ ShuffleMask[i] = i % NumSrcElts;
+
+ Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
+ CI->getArgOperand(0),
+ ShuffleMask);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
+ Name.startswith("avx2.vbroadcast") ||
+ Name.startswith("avx512.pbroadcast") ||
+ Name.startswith("avx512.mask.broadcast.s"))) {
+ // Replace vp?broadcasts with a vector shuffle.
+ Value *Op = CI->getArgOperand(0);
+ ElementCount EC = cast<VectorType>(CI->getType())->getElementCount();
+ Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC);
+ SmallVector<int, 8> M;
+ ShuffleVectorInst::getShuffleMask(Constant::getNullValue(MaskTy), M);
+ Rep = Builder.CreateShuffleVector(Op, M);
+
+ if (CI->arg_size() == 3)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("sse2.padds.") ||
+ Name.startswith("avx2.padds.") ||
+ Name.startswith("avx512.padds.") ||
+ Name.startswith("avx512.mask.padds."))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::sadd_sat);
+ } else if (IsX86 && (Name.startswith("sse2.psubs.") ||
+ Name.startswith("avx2.psubs.") ||
+ Name.startswith("avx512.psubs.") ||
+ Name.startswith("avx512.mask.psubs."))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::ssub_sat);
+ } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
+ Name.startswith("avx2.paddus.") ||
+ Name.startswith("avx512.mask.paddus."))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::uadd_sat);
+ } else if (IsX86 && (Name.startswith("sse2.psubus.") ||
+ Name.startswith("avx2.psubus.") ||
+ Name.startswith("avx512.mask.psubus."))) {
+ Rep = UpgradeX86BinaryIntrinsics(Builder, *CI, Intrinsic::usub_sat);
+ } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
+ Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ CI->getArgOperand(2),
+ CI->getArgOperand(3),
+ CI->getArgOperand(4),
+ false);
+ } else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
+ Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ CI->getArgOperand(2),
+ CI->getArgOperand(3),
+ CI->getArgOperand(4),
+ true);
+ } else if (IsX86 && (Name == "sse2.psll.dq" ||
+ Name == "avx2.psll.dq")) {
+ // 128/256-bit shift left specified in bits.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
+ Shift / 8); // Shift is in bits.
+ } else if (IsX86 && (Name == "sse2.psrl.dq" ||
+ Name == "avx2.psrl.dq")) {
+ // 128/256-bit shift right specified in bits.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
+ Shift / 8); // Shift is in bits.
+ } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
+ Name == "avx2.psll.dq.bs" ||
+ Name == "avx512.psll.dq.512")) {
+ // 128/256/512-bit shift left specified in bytes.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
+ } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
+ Name == "avx2.psrl.dq.bs" ||
+ Name == "avx512.psrl.dq.512")) {
+ // 128/256/512-bit shift right specified in bytes.
+ unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
+ } else if (IsX86 && (Name == "sse41.pblendw" ||
+ Name.startswith("sse41.blendp") ||
+ Name.startswith("avx.blend.p") ||
+ Name == "avx2.pblendw" ||
+ Name.startswith("avx2.pblendd."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ auto *VecTy = cast<FixedVectorType>(CI->getType());
+ unsigned NumElts = VecTy->getNumElements();
+
+ SmallVector<int, 16> Idxs(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i)
+ Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
+
+ Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
+ } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
+ Name == "avx2.vinserti128" ||
+ Name.startswith("avx512.mask.insert"))) {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ unsigned DstNumElts =
+ cast<FixedVectorType>(CI->getType())->getNumElements();
+ unsigned SrcNumElts =
+ cast<FixedVectorType>(Op1->getType())->getNumElements();
+ unsigned Scale = DstNumElts / SrcNumElts;
+
+ // Mask off the high bits of the immediate value; hardware ignores those.
+ Imm = Imm % Scale;
+
+ // Extend the second operand into a vector the size of the destination.
+ SmallVector<int, 8> Idxs(DstNumElts);
+ for (unsigned i = 0; i != SrcNumElts; ++i)
+ Idxs[i] = i;
+ for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
+ Idxs[i] = SrcNumElts;
+ Rep = Builder.CreateShuffleVector(Op1, Idxs);
+
+ // Insert the second operand into the first operand.
+
+ // Note that there is no guarantee that instruction lowering will actually
+ // produce a vinsertf128 instruction for the created shuffles. In
+ // particular, the 0 immediate case involves no lane changes, so it can
+ // be handled as a blend.
+
+ // Example of shuffle mask for 32-bit elements:
+ // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
+ // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
+
+ // First fill with identify mask.
+ for (unsigned i = 0; i != DstNumElts; ++i)
+ Idxs[i] = i;
+ // Then replace the elements where we need to insert.
+ for (unsigned i = 0; i != SrcNumElts; ++i)
+ Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
+ Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
+
+ // If the intrinsic has a mask operand, handle that.
+ if (CI->arg_size() == 5)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
+ CI->getArgOperand(3));
+ } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
+ Name == "avx2.vextracti128" ||
+ Name.startswith("avx512.mask.vextract"))) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ unsigned DstNumElts =
+ cast<FixedVectorType>(CI->getType())->getNumElements();
+ unsigned SrcNumElts =
+ cast<FixedVectorType>(Op0->getType())->getNumElements();
+ unsigned Scale = SrcNumElts / DstNumElts;
+
+ // Mask off the high bits of the immediate value; hardware ignores those.
+ Imm = Imm % Scale;
+
+ // Get indexes for the subvector of the input vector.
+ SmallVector<int, 8> Idxs(DstNumElts);
+ for (unsigned i = 0; i != DstNumElts; ++i) {
+ Idxs[i] = i + (Imm * DstNumElts);
+ }
+ Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
+
+ // If the intrinsic has a mask operand, handle that.
+ if (CI->arg_size() == 4)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (!IsX86 && Name == "stackprotectorcheck") {
+ Rep = nullptr;
+ } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
+ Name.startswith("avx512.mask.perm.di."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ auto *VecTy = cast<FixedVectorType>(CI->getType());
+ unsigned NumElts = VecTy->getNumElements();
+
+ SmallVector<int, 8> Idxs(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i)
+ Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
+
+ Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
+
+ if (CI->arg_size() == 4)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
+ Name == "avx2.vperm2i128")) {
+ // The immediate permute control byte looks like this:
+ // [1:0] - select 128 bits from sources for low half of destination
+ // [2] - ignore
+ // [3] - zero low half of destination
+ // [5:4] - select 128 bits from sources for high half of destination
+ // [6] - ignore
+ // [7] - zero high half of destination
+
+ uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+
+ unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+ unsigned HalfSize = NumElts / 2;
+ SmallVector<int, 8> ShuffleMask(NumElts);
+
+ // Determine which operand(s) are actually in use for this instruction.
+ Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
+ Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
+
+ // If needed, replace operands based on zero mask.
+ V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
+ V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
+
+ // Permute low half of result.
+ unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
+ for (unsigned i = 0; i < HalfSize; ++i)
+ ShuffleMask[i] = StartIndex + i;
+
+ // Permute high half of result.
+ StartIndex = (Imm & 0x10) ? HalfSize : 0;
+ for (unsigned i = 0; i < HalfSize; ++i)
+ ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
+
+ Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
+
+ } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
+ Name == "sse2.pshuf.d" ||
+ Name.startswith("avx512.mask.vpermil.p") ||
+ Name.startswith("avx512.mask.pshuf.d."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ auto *VecTy = cast<FixedVectorType>(CI->getType());
+ unsigned NumElts = VecTy->getNumElements();
+ // Calculate the size of each index in the immediate.
+ unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
+ unsigned IdxMask = ((1 << IdxSize) - 1);
+
+ SmallVector<int, 8> Idxs(NumElts);
+ // Lookup the bits for this element, wrapping around the immediate every
+ // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
+ // to offset by the first index of each group.
+ for (unsigned i = 0; i != NumElts; ++i)
+ Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
+
+ Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
+
+ if (CI->arg_size() == 4)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name == "sse2.pshufl.w" ||
+ Name.startswith("avx512.mask.pshufl.w."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+
+ SmallVector<int, 16> Idxs(NumElts);
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ for (unsigned i = 0; i != 4; ++i)
+ Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
+ for (unsigned i = 4; i != 8; ++i)
+ Idxs[i + l] = i + l;
+ }
+
+ Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
+
+ if (CI->arg_size() == 4)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name == "sse2.pshufh.w" ||
+ Name.startswith("avx512.mask.pshufh.w."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+
+ SmallVector<int, 16> Idxs(NumElts);
+ for (unsigned l = 0; l != NumElts; l += 8) {
+ for (unsigned i = 0; i != 4; ++i)
+ Idxs[i + l] = i + l;
+ for (unsigned i = 0; i != 4; ++i)
+ Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
+ }
+
+ Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
+
+ if (CI->arg_size() == 4)
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+
+ unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
+ unsigned HalfLaneElts = NumLaneElts / 2;
+
+ SmallVector<int, 16> Idxs(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Base index is the starting element of the lane.
+ Idxs[i] = i - (i % NumLaneElts);
+ // If we are half way through the lane switch to the other source.
+ if ((i % NumLaneElts) >= HalfLaneElts)
+ Idxs[i] += NumElts;
+ // Now select the specific element. By adding HalfLaneElts bits from
+ // the immediate. Wrapping around the immediate every 8-bits.
+ Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
+ }
+
+ Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
+
+ Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
+ CI->getArgOperand(3));
+ } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
+ Name.startswith("avx512.mask.movshdup") ||
+ Name.startswith("avx512.mask.movsldup"))) {
+ Value *Op0 = CI->getArgOperand(0);
+ unsigned NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+ unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
+
+ unsigned Offset = 0;
+ if (Name.startswith("avx512.mask.movshdup."))
+ Offset = 1;
+
+ SmallVector<int, 16> Idxs(NumElts);
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts)
+ for (unsigned i = 0; i != NumLaneElts; i += 2) {
+ Idxs[i + l + 0] = i + l + Offset;
+ Idxs[i + l + 1] = i + l + Offset;
+ }
+
+ Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
+
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
+ Name.startswith("avx512.mask.unpckl."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+ int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
+
+ SmallVector<int, 64> Idxs(NumElts);
+ for (int l = 0; l != NumElts; l += NumLaneElts)
+ for (int i = 0; i != NumLaneElts; ++i)
+ Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
+
+ Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
+
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
+ Name.startswith("avx512.mask.unpckh."))) {
+ Value *Op0 = CI->getArgOperand(0);
+ Value *Op1 = CI->getArgOperand(1);
+ int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+ int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
+
+ SmallVector<int, 64> Idxs(NumElts);
+ for (int l = 0; l != NumElts; l += NumLaneElts)
+ for (int i = 0; i != NumLaneElts; ++i)
+ Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
+
+ Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
+
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
+ Name.startswith("avx512.mask.pand."))) {
+ VectorType *FTy = cast<VectorType>(CI->getType());
+ VectorType *ITy = VectorType::getInteger(FTy);
+ Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
+ Builder.CreateBitCast(CI->getArgOperand(1), ITy));
+ Rep = Builder.CreateBitCast(Rep, FTy);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
+ Name.startswith("avx512.mask.pandn."))) {
+ VectorType *FTy = cast<VectorType>(CI->getType());
+ VectorType *ITy = VectorType::getInteger(FTy);
+ Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
+ Rep = Builder.CreateAnd(Rep,
+ Builder.CreateBitCast(CI->getArgOperand(1), ITy));
+ Rep = Builder.CreateBitCast(Rep, FTy);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
+ Name.startswith("avx512.mask.por."))) {
+ VectorType *FTy = cast<VectorType>(CI->getType());
+ VectorType *ITy = VectorType::getInteger(FTy);
+ Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
+ Builder.CreateBitCast(CI->getArgOperand(1), ITy));
+ Rep = Builder.CreateBitCast(Rep, FTy);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
+ Name.startswith("avx512.mask.pxor."))) {
+ VectorType *FTy = cast<VectorType>(CI->getType());
+ VectorType *ITy = VectorType::getInteger(FTy);
+ Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
+ Builder.CreateBitCast(CI->getArgOperand(1), ITy));
+ Rep = Builder.CreateBitCast(Rep, FTy);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
+ Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
+ Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
+ Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
+ if (Name.endswith(".512")) {
+ Intrinsic::ID IID;
+ if (Name[17] == 's')
+ IID = Intrinsic::x86_avx512_add_ps_512;
+ else
+ IID = Intrinsic::x86_avx512_add_pd_512;
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4) });
+ } else {
+ Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
+ }
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
+ if (Name.endswith(".512")) {
+ Intrinsic::ID IID;
+ if (Name[17] == 's')
+ IID = Intrinsic::x86_avx512_div_ps_512;
+ else
+ IID = Intrinsic::x86_avx512_div_pd_512;
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4) });
+ } else {
+ Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
+ }
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
+ if (Name.endswith(".512")) {
+ Intrinsic::ID IID;
+ if (Name[17] == 's')
+ IID = Intrinsic::x86_avx512_mul_ps_512;
+ else
+ IID = Intrinsic::x86_avx512_mul_pd_512;
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4) });
+ } else {
+ Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
+ }
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
+ if (Name.endswith(".512")) {
+ Intrinsic::ID IID;
+ if (Name[17] == 's')
+ IID = Intrinsic::x86_avx512_sub_ps_512;
+ else
+ IID = Intrinsic::x86_avx512_sub_pd_512;
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4) });
+ } else {
+ Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
+ }
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
+ Name.startswith("avx512.mask.min.p")) &&
+ Name.drop_front(18) == ".512") {
+ bool IsDouble = Name[17] == 'd';
+ bool IsMin = Name[13] == 'i';
+ static const Intrinsic::ID MinMaxTbl[2][2] = {
+ { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
+ { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
+ };
+ Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4) });
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
+ CI->getArgOperand(2));
+ } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::ctlz,
+ CI->getType()),
+ { CI->getArgOperand(0), Builder.getInt1(false) });
+ Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
+ CI->getArgOperand(1));
+ } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
+ bool IsImmediate = Name[16] == 'i' ||
+ (Name.size() > 18 && Name[18] == 'i');
+ bool IsVariable = Name[16] == 'v';
+ char Size = Name[16] == '.' ? Name[17] :
+ Name[17] == '.' ? Name[18] :
+ Name[18] == '.' ? Name[19] :
+ Name[20];
+
+ Intrinsic::ID IID;
+ if (IsVariable && Name[17] != '.') {
+ if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
+ IID = Intrinsic::x86_avx2_psllv_q;
+ else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
+ IID = Intrinsic::x86_avx2_psllv_q_256;
+ else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
+ IID = Intrinsic::x86_avx2_psllv_d;
+ else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
+ IID = Intrinsic::x86_avx2_psllv_d_256;
+ else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
+ IID = Intrinsic::x86_avx512_psllv_w_128;
+ else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
+ IID = Intrinsic::x86_avx512_psllv_w_256;
+ else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
+ IID = Intrinsic::x86_avx512_psllv_w_512;
+ else
+ llvm_unreachable("Unexpected size");
+ } else if (Name.endswith(".128")) {
+ if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
+ : Intrinsic::x86_sse2_psll_d;
+ else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
+ : Intrinsic::x86_sse2_psll_q;
+ else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
+ : Intrinsic::x86_sse2_psll_w;
+ else
+ llvm_unreachable("Unexpected size");
+ } else if (Name.endswith(".256")) {
+ if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
+ : Intrinsic::x86_avx2_psll_d;
+ else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
+ : Intrinsic::x86_avx2_psll_q;
+ else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
+ : Intrinsic::x86_avx2_psll_w;
+ else
+ llvm_unreachable("Unexpected size");
+ } else {
+ if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
+ IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
+ IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
+ Intrinsic::x86_avx512_psll_d_512;
+ else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
+ IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
+ IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
+ Intrinsic::x86_avx512_psll_q_512;
+ else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
+ IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
+ : Intrinsic::x86_avx512_psll_w_512;
+ else
+ llvm_unreachable("Unexpected size");
+ }
+
+ Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
+ } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
+ bool IsImmediate = Name[16] == 'i' ||
+ (Name.size() > 18 && Name[18] == 'i');
+ bool IsVariable = Name[16] == 'v';
+ char Size = Name[16] == '.' ? Name[17] :
+ Name[17] == '.' ? Name[18] :
+ Name[18] == '.' ? Name[19] :
+ Name[20];
+
+ Intrinsic::ID IID;
+ if (IsVariable && Name[17] != '.') {
+ if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
+ IID = Intrinsic::x86_avx2_psrlv_q;
+ else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
+ IID = Intrinsic::x86_avx2_psrlv_q_256;
+ else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
+ IID = Intrinsic::x86_avx2_psrlv_d;
+ else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
+ IID = Intrinsic::x86_avx2_psrlv_d_256;
+ else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
+ IID = Intrinsic::x86_avx512_psrlv_w_128;
+ else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
+ IID = Intrinsic::x86_avx512_psrlv_w_256;
+ else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
+ IID = Intrinsic::x86_avx512_psrlv_w_512;
+ else
+ llvm_unreachable("Unexpected size");
+ } else if (Name.endswith(".128")) {
+ if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
+ : Intrinsic::x86_sse2_psrl_d;
+ else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
+ : Intrinsic::x86_sse2_psrl_q;
+ else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
+ : Intrinsic::x86_sse2_psrl_w;
+ else
+ llvm_unreachable("Unexpected size");
+ } else if (Name.endswith(".256")) {
+ if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
+ : Intrinsic::x86_avx2_psrl_d;
+ else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
+ : Intrinsic::x86_avx2_psrl_q;
+ else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
+ : Intrinsic::x86_avx2_psrl_w;
+ else
+ llvm_unreachable("Unexpected size");
+ } else {
+ if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
+ IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
+ Intrinsic::x86_avx512_psrl_d_512;
+ else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
+ IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
+ Intrinsic::x86_avx512_psrl_q_512;
+ else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
+ : Intrinsic::x86_avx512_psrl_w_512;
+ else
+ llvm_unreachable("Unexpected size");
+ }
+
+ Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
+ } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
+ bool IsImmediate = Name[16] == 'i' ||
+ (Name.size() > 18 && Name[18] == 'i');
+ bool IsVariable = Name[16] == 'v';
+ char Size = Name[16] == '.' ? Name[17] :
+ Name[17] == '.' ? Name[18] :
+ Name[18] == '.' ? Name[19] :
+ Name[20];
+
+ Intrinsic::ID IID;
+ if (IsVariable && Name[17] != '.') {
+ if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
+ IID = Intrinsic::x86_avx2_psrav_d;
+ else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
+ IID = Intrinsic::x86_avx2_psrav_d_256;
+ else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
+ IID = Intrinsic::x86_avx512_psrav_w_128;
+ else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
+ IID = Intrinsic::x86_avx512_psrav_w_256;
+ else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
+ IID = Intrinsic::x86_avx512_psrav_w_512;
+ else
+ llvm_unreachable("Unexpected size");
+ } else if (Name.endswith(".128")) {
+ if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
+ : Intrinsic::x86_sse2_psra_d;
+ else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
+ IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
+ Intrinsic::x86_avx512_psra_q_128;
+ else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
+ IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
+ : Intrinsic::x86_sse2_psra_w;
+ else
+ llvm_unreachable("Unexpected size");
+ } else if (Name.endswith(".256")) {
+ if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
+ : Intrinsic::x86_avx2_psra_d;
+ else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
+ IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
+ Intrinsic::x86_avx512_psra_q_256;
+ else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
+ IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
+ : Intrinsic::x86_avx2_psra_w;
+ else
+ llvm_unreachable("Unexpected size");
+ } else {
+ if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
+ IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
+ Intrinsic::x86_avx512_psra_d_512;
+ else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
+ IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
+ Intrinsic::x86_avx512_psra_q_512;
+ else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
+ IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
+ : Intrinsic::x86_avx512_psra_w_512;
+ else
+ llvm_unreachable("Unexpected size");
+ }
+
+ Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
+ } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
+ Rep = upgradeMaskedMove(Builder, *CI);
+ } else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
+ Rep = UpgradeMaskToInt(Builder, *CI);
+ } else if (IsX86 && Name.endswith(".movntdqa")) {
+ Module *M = F->getParent();
+ MDNode *Node = MDNode::get(
+ C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
+
+ Value *Ptr = CI->getArgOperand(0);
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(
+ Ptr, PointerType::getUnqual(CI->getType()), "cast");
+ LoadInst *LI = Builder.CreateAlignedLoad(
+ CI->getType(), BC,
+ Align(CI->getType()->getPrimitiveSizeInBits().getFixedSize() / 8));
+ LI->setMetadata(M->getMDKindID("nontemporal"), Node);
+ Rep = LI;
+ } else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
+ Name.startswith("fma.vfmsub.") ||
+ Name.startswith("fma.vfnmadd.") ||
+ Name.startswith("fma.vfnmsub."))) {
+ bool NegMul = Name[6] == 'n';
+ bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
+ bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
+
+ Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+
+ if (IsScalar) {
+ Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
+ Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
+ }
+
+ if (NegMul && !IsScalar)
+ Ops[0] = Builder.CreateFNeg(Ops[0]);
+ if (NegMul && IsScalar)
+ Ops[1] = Builder.CreateFNeg(Ops[1]);
+ if (NegAcc)
+ Ops[2] = Builder.CreateFNeg(Ops[2]);
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
+ Intrinsic::fma,
+ Ops[0]->getType()),
+ Ops);
+
+ if (IsScalar)
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
+ (uint64_t)0);
+ } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) {
+ Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+
+ Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
+ Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
+ Intrinsic::fma,
+ Ops[0]->getType()),
+ Ops);
+
+ Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
+ Rep, (uint64_t)0);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") ||
+ Name.startswith("avx512.maskz.vfmadd.s") ||
+ Name.startswith("avx512.mask3.vfmadd.s") ||
+ Name.startswith("avx512.mask3.vfmsub.s") ||
+ Name.startswith("avx512.mask3.vfnmsub.s"))) {
+ bool IsMask3 = Name[11] == '3';
+ bool IsMaskZ = Name[11] == 'z';
+ // Drop the "avx512.mask." to make it easier.
+ Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
+ bool NegMul = Name[2] == 'n';
+ bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
+
+ Value *A = CI->getArgOperand(0);
+ Value *B = CI->getArgOperand(1);
+ Value *C = CI->getArgOperand(2);
+
+ if (NegMul && (IsMask3 || IsMaskZ))
+ A = Builder.CreateFNeg(A);
+ if (NegMul && !(IsMask3 || IsMaskZ))
+ B = Builder.CreateFNeg(B);
+ if (NegAcc)
+ C = Builder.CreateFNeg(C);
+
+ A = Builder.CreateExtractElement(A, (uint64_t)0);
+ B = Builder.CreateExtractElement(B, (uint64_t)0);
+ C = Builder.CreateExtractElement(C, (uint64_t)0);
+
+ if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
+ cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
+ Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
+
+ Intrinsic::ID IID;
+ if (Name.back() == 'd')
+ IID = Intrinsic::x86_avx512_vfmadd_f64;
+ else
+ IID = Intrinsic::x86_avx512_vfmadd_f32;
+ Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
+ Rep = Builder.CreateCall(FMA, Ops);
+ } else {
+ Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
+ Intrinsic::fma,
+ A->getType());
+ Rep = Builder.CreateCall(FMA, { A, B, C });
+ }
+
+ Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
+ IsMask3 ? C : A;
+
+ // For Mask3 with NegAcc, we need to create a new extractelement that
+ // avoids the negation above.
+ if (NegAcc && IsMask3)
+ PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
+ (uint64_t)0);
+
+ Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
+ Rep, PassThru);
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
+ Rep, (uint64_t)0);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") ||
+ Name.startswith("avx512.mask.vfnmadd.p") ||
+ Name.startswith("avx512.mask.vfnmsub.p") ||
+ Name.startswith("avx512.mask3.vfmadd.p") ||
+ Name.startswith("avx512.mask3.vfmsub.p") ||
+ Name.startswith("avx512.mask3.vfnmsub.p") ||
+ Name.startswith("avx512.maskz.vfmadd.p"))) {
+ bool IsMask3 = Name[11] == '3';
+ bool IsMaskZ = Name[11] == 'z';
+ // Drop the "avx512.mask." to make it easier.
+ Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
+ bool NegMul = Name[2] == 'n';
+ bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
+
+ Value *A = CI->getArgOperand(0);
+ Value *B = CI->getArgOperand(1);
+ Value *C = CI->getArgOperand(2);
+
+ if (NegMul && (IsMask3 || IsMaskZ))
+ A = Builder.CreateFNeg(A);
+ if (NegMul && !(IsMask3 || IsMaskZ))
+ B = Builder.CreateFNeg(B);
+ if (NegAcc)
+ C = Builder.CreateFNeg(C);
+
+ if (CI->arg_size() == 5 &&
+ (!isa<ConstantInt>(CI->getArgOperand(4)) ||
+ cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
+ Intrinsic::ID IID;
+ // Check the character before ".512" in string.
+ if (Name[Name.size()-5] == 's')
+ IID = Intrinsic::x86_avx512_vfmadd_ps_512;
+ else
+ IID = Intrinsic::x86_avx512_vfmadd_pd_512;
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { A, B, C, CI->getArgOperand(4) });
+ } else {
+ Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
+ Intrinsic::fma,
+ A->getType());
+ Rep = Builder.CreateCall(FMA, { A, B, C });
+ }
+
+ Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
+ IsMask3 ? CI->getArgOperand(2) :
+ CI->getArgOperand(0);
+
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ } else if (IsX86 && Name.startswith("fma.vfmsubadd.p")) {
+ unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
+ unsigned EltWidth = CI->getType()->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_fma_vfmaddsub_ps;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_fma_vfmaddsub_ps_256;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_fma_vfmaddsub_pd;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_fma_vfmaddsub_pd_256;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+ Ops[2] = Builder.CreateFNeg(Ops[2]);
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ Ops);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") ||
+ Name.startswith("avx512.mask3.vfmaddsub.p") ||
+ Name.startswith("avx512.maskz.vfmaddsub.p") ||
+ Name.startswith("avx512.mask3.vfmsubadd.p"))) {
+ bool IsMask3 = Name[11] == '3';
+ bool IsMaskZ = Name[11] == 'z';
+ // Drop the "avx512.mask." to make it easier.
+ Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
+ bool IsSubAdd = Name[3] == 's';
+ if (CI->arg_size() == 5) {
+ Intrinsic::ID IID;
+ // Check the character before ".512" in string.
+ if (Name[Name.size()-5] == 's')
+ IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
+ else
+ IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
+
+ Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(4) };
+ if (IsSubAdd)
+ Ops[2] = Builder.CreateFNeg(Ops[2]);
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ Ops);
+ } else {
+ int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
+
+ Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+
+ Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
+ Ops[0]->getType());
+ Value *Odd = Builder.CreateCall(FMA, Ops);
+ Ops[2] = Builder.CreateFNeg(Ops[2]);
+ Value *Even = Builder.CreateCall(FMA, Ops);
+
+ if (IsSubAdd)
+ std::swap(Even, Odd);
+
+ SmallVector<int, 32> Idxs(NumElts);
+ for (int i = 0; i != NumElts; ++i)
+ Idxs[i] = i + (i % 2) * NumElts;
+
+ Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
+ }
+
+ Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
+ IsMask3 ? CI->getArgOperand(2) :
+ CI->getArgOperand(0);
+
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
+ Name.startswith("avx512.maskz.pternlog."))) {
+ bool ZeroMask = Name[11] == 'z';
+ unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
+ unsigned EltWidth = CI->getType()->getScalarSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pternlog_d_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pternlog_d_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pternlog_d_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pternlog_q_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pternlog_q_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pternlog_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(3) };
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
+ Args);
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
+ : CI->getArgOperand(0);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
+ Name.startswith("avx512.maskz.vpmadd52"))) {
+ bool ZeroMask = Name[11] == 'z';
+ bool High = Name[20] == 'h' || Name[21] == 'h';
+ unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && !High)
+ IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
+ else if (VecWidth == 256 && !High)
+ IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
+ else if (VecWidth == 512 && !High)
+ IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
+ else if (VecWidth == 128 && High)
+ IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
+ else if (VecWidth == 256 && High)
+ IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
+ else if (VecWidth == 512 && High)
+ IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
+ Args);
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
+ : CI->getArgOperand(0);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
+ Name.startswith("avx512.mask.vpermt2var.") ||
+ Name.startswith("avx512.maskz.vpermt2var."))) {
+ bool ZeroMask = Name[11] == 'z';
+ bool IndexForm = Name[17] == 'i';
+ Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
+ Name.startswith("avx512.maskz.vpdpbusd.") ||
+ Name.startswith("avx512.mask.vpdpbusds.") ||
+ Name.startswith("avx512.maskz.vpdpbusds."))) {
+ bool ZeroMask = Name[11] == 'z';
+ bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
+ unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && !IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpbusd_128;
+ else if (VecWidth == 256 && !IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpbusd_256;
+ else if (VecWidth == 512 && !IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpbusd_512;
+ else if (VecWidth == 128 && IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpbusds_128;
+ else if (VecWidth == 256 && IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpbusds_256;
+ else if (VecWidth == 512 && IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpbusds_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
+ Args);
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
+ : CI->getArgOperand(0);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
+ Name.startswith("avx512.maskz.vpdpwssd.") ||
+ Name.startswith("avx512.mask.vpdpwssds.") ||
+ Name.startswith("avx512.maskz.vpdpwssds."))) {
+ bool ZeroMask = Name[11] == 'z';
+ bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
+ unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && !IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpwssd_128;
+ else if (VecWidth == 256 && !IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpwssd_256;
+ else if (VecWidth == 512 && !IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpwssd_512;
+ else if (VecWidth == 128 && IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpwssds_128;
+ else if (VecWidth == 256 && IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpwssds_256;
+ else if (VecWidth == 512 && IsSaturating)
+ IID = Intrinsic::x86_avx512_vpdpwssds_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2) };
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
+ Args);
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
+ : CI->getArgOperand(0);
+ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
+ Name == "addcarry.u32" || Name == "addcarry.u64" ||
+ Name == "subborrow.u32" || Name == "subborrow.u64")) {
+ Intrinsic::ID IID;
+ if (Name[0] == 'a' && Name.back() == '2')
+ IID = Intrinsic::x86_addcarry_32;
+ else if (Name[0] == 'a' && Name.back() == '4')
+ IID = Intrinsic::x86_addcarry_64;
+ else if (Name[0] == 's' && Name.back() == '2')
+ IID = Intrinsic::x86_subborrow_32;
+ else if (Name[0] == 's' && Name.back() == '4')
+ IID = Intrinsic::x86_subborrow_64;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ // Make a call with 3 operands.
+ Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2)};
+ Value *NewCall = Builder.CreateCall(
+ Intrinsic::getDeclaration(CI->getModule(), IID),
+ Args);
+
+ // Extract the second result and store it.
+ Value *Data = Builder.CreateExtractValue(NewCall, 1);
+ // Cast the pointer to the right type.
+ Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
+ llvm::PointerType::getUnqual(Data->getType()));
+ Builder.CreateAlignedStore(Data, Ptr, Align(1));
+ // Replace the original call result with the first result of the new call.
+ Value *CF = Builder.CreateExtractValue(NewCall, 0);
+
+ CI->replaceAllUsesWith(CF);
+ Rep = nullptr;
+ } else if (IsX86 && Name.startswith("avx512.mask.") &&
+ upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
+ // Rep will be updated by the call in the condition.
+ } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
+ Value *Arg = CI->getArgOperand(0);
+ Value *Neg = Builder.CreateNeg(Arg, "neg");
+ Value *Cmp = Builder.CreateICmpSGE(
+ Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
+ } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
+ Name.startswith("atomic.load.add.f64.p"))) {
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Val = CI->getArgOperand(1);
+ Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, MaybeAlign(),
+ AtomicOrdering::SequentiallyConsistent);
+ } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
+ Name == "max.ui" || Name == "max.ull")) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
+ ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
+ : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
+ } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
+ Name == "min.ui" || Name == "min.ull")) {
+ Value *Arg0 = CI->getArgOperand(0);
+ Value *Arg1 = CI->getArgOperand(1);
+ Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
+ ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
+ : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
+ Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
+ } else if (IsNVVM && Name == "clz.ll") {
+ // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
+ Value *Arg = CI->getArgOperand(0);
+ Value *Ctlz = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
+ {Arg->getType()}),
+ {Arg, Builder.getFalse()}, "ctlz");
+ Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
+ } else if (IsNVVM && Name == "popc.ll") {
+ // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
+ // i64.
+ Value *Arg = CI->getArgOperand(0);
+ Value *Popc = Builder.CreateCall(
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
+ {Arg->getType()}),
+ Arg, "ctpop");
+ Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
+ } else if (IsNVVM && Name == "h2f") {
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::convert_from_fp16,
+ {Builder.getFloatTy()}),
+ CI->getArgOperand(0), "h2f");
+ } else if (IsARM) {
+ Rep = UpgradeARMIntrinsicCall(Name, CI, F, Builder);
+ } else {
+ llvm_unreachable("Unknown function for CallBase upgrade.");
+ }
+
+ if (Rep)
+ CI->replaceAllUsesWith(Rep);
+ CI->eraseFromParent();
+ return;
+ }
+
+ const auto &DefaultCase = [&]() -> void {
+ if (CI->getFunctionType() == NewFn->getFunctionType()) {
+ // Handle generic mangling change.
+ assert(
+ (CI->getCalledFunction()->getName() != NewFn->getName()) &&
+ "Unknown function for CallBase upgrade and isn't just a name change");
+ CI->setCalledFunction(NewFn);
+ return;
+ }
+
+ // This must be an upgrade from a named to a literal struct.
+ auto *OldST = cast<StructType>(CI->getType());
+ assert(OldST != NewFn->getReturnType() && "Return type must have changed");
+ assert(OldST->getNumElements() ==
+ cast<StructType>(NewFn->getReturnType())->getNumElements() &&
+ "Must have same number of elements");
+
+ SmallVector<Value *> Args(CI->args());
+ Value *NewCI = Builder.CreateCall(NewFn, Args);
+ Value *Res = PoisonValue::get(OldST);
+ for (unsigned Idx = 0; Idx < OldST->getNumElements(); ++Idx) {
+ Value *Elem = Builder.CreateExtractValue(NewCI, Idx);
+ Res = Builder.CreateInsertValue(Res, Elem, Idx);
+ }
+ CI->replaceAllUsesWith(Res);
+ CI->eraseFromParent();
+ return;
+ };
+ CallInst *NewCall = nullptr;
+ switch (NewFn->getIntrinsicID()) {
+ default: {
+ DefaultCase();
+ return;
+ }
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane: {
+ SmallVector<Value *, 4> Args(CI->args());
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
+ }
+
+ case Intrinsic::arm_neon_bfdot:
+ case Intrinsic::arm_neon_bfmmla:
+ case Intrinsic::arm_neon_bfmlalb:
+ case Intrinsic::arm_neon_bfmlalt:
+ case Intrinsic::aarch64_neon_bfdot:
+ case Intrinsic::aarch64_neon_bfmmla:
+ case Intrinsic::aarch64_neon_bfmlalb:
+ case Intrinsic::aarch64_neon_bfmlalt: {
+ SmallVector<Value *, 3> Args;
+ assert(CI->arg_size() == 3 &&
+ "Mismatch between function args and call args");
+ size_t OperandWidth =
+ CI->getArgOperand(1)->getType()->getPrimitiveSizeInBits();
+ assert((OperandWidth == 64 || OperandWidth == 128) &&
+ "Unexpected operand width");
+ Type *NewTy = FixedVectorType::get(Type::getBFloatTy(C), OperandWidth / 16);
+ auto Iter = CI->args().begin();
+ Args.push_back(*Iter++);
+ Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
+ Args.push_back(Builder.CreateBitCast(*Iter++, NewTy));
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
+ }
+
+ case Intrinsic::bitreverse:
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
+ break;
+
+ case Intrinsic::ctlz:
+ case Intrinsic::cttz:
+ assert(CI->arg_size() == 1 &&
+ "Mismatch between function args and call args");
+ NewCall =
+ Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
+ break;
+
+ case Intrinsic::objectsize: {
+ Value *NullIsUnknownSize =
+ CI->arg_size() == 2 ? Builder.getFalse() : CI->getArgOperand(2);
+ Value *Dynamic =
+ CI->arg_size() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
+ NewCall = Builder.CreateCall(
+ NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
+ break;
+ }
+
+ case Intrinsic::ctpop:
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
+ break;
+
+ case Intrinsic::convert_from_fp16:
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
+ break;
+
+ case Intrinsic::dbg_value:
+ // Upgrade from the old version that had an extra offset argument.
+ assert(CI->arg_size() == 4);
+ // Drop nonzero offsets instead of attempting to upgrade them.
+ if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
+ if (Offset->isZeroValue()) {
+ NewCall = Builder.CreateCall(
+ NewFn,
+ {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
+ break;
+ }
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::ptr_annotation:
+ // Upgrade from versions that lacked the annotation attribute argument.
+ if (CI->arg_size() != 4) {
+ DefaultCase();
+ return;
+ }
+
+ // Create a new call with an added null annotation attribute argument.
+ NewCall = Builder.CreateCall(
+ NewFn,
+ {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
+ CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::var_annotation:
+ // Upgrade from versions that lacked the annotation attribute argument.
+ assert(CI->arg_size() == 4 &&
+ "Before LLVM 12.0 this intrinsic took four arguments");
+ // Create a new call with an added null annotation attribute argument.
+ NewCall = Builder.CreateCall(
+ NewFn,
+ {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
+ CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
+ CI->eraseFromParent();
+ return;
+
+ case Intrinsic::x86_xop_vfrcz_ss:
+ case Intrinsic::x86_xop_vfrcz_sd:
+ NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
+ break;
+
+ case Intrinsic::x86_xop_vpermil2pd:
+ case Intrinsic::x86_xop_vpermil2ps:
+ case Intrinsic::x86_xop_vpermil2pd_256:
+ case Intrinsic::x86_xop_vpermil2ps_256: {
+ SmallVector<Value *, 4> Args(CI->args());
+ VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
+ VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
+ Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
+ }
+
+ case Intrinsic::x86_sse41_ptestc:
+ case Intrinsic::x86_sse41_ptestz:
+ case Intrinsic::x86_sse41_ptestnzc: {
+ // The arguments for these intrinsics used to be v4f32, and changed
+ // to v2i64. This is purely a nop, since those are bitwise intrinsics.
+ // So, the only thing required is a bitcast for both arguments.
+ // First, check the arguments have the old type.
+ Value *Arg0 = CI->getArgOperand(0);
+ if (Arg0->getType() != FixedVectorType::get(Type::getFloatTy(C), 4))
+ return;
+
+ // Old intrinsic, add bitcasts
+ Value *Arg1 = CI->getArgOperand(1);
+
+ auto *NewVecTy = FixedVectorType::get(Type::getInt64Ty(C), 2);
+
+ Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
+ Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
+
+ NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
+ break;
+ }
+
+ case Intrinsic::x86_rdtscp: {
+ // This used to take 1 arguments. If we have no arguments, it is already
+ // upgraded.
+ if (CI->getNumOperands() == 0)
+ return;
+
+ NewCall = Builder.CreateCall(NewFn);
+ // Extract the second result and store it.
+ Value *Data = Builder.CreateExtractValue(NewCall, 1);
+ // Cast the pointer to the right type.
+ Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
+ llvm::PointerType::getUnqual(Data->getType()));
+ Builder.CreateAlignedStore(Data, Ptr, Align(1));
+ // Replace the original call result with the first result of the new call.
+ Value *TSC = Builder.CreateExtractValue(NewCall, 0);
+
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(TSC);
+ CI->eraseFromParent();
+ return;
+ }
+
+ case Intrinsic::x86_sse41_insertps:
+ case Intrinsic::x86_sse41_dppd:
+ case Intrinsic::x86_sse41_dpps:
+ case Intrinsic::x86_sse41_mpsadbw:
+ case Intrinsic::x86_avx_dp_ps_256:
+ case Intrinsic::x86_avx2_mpsadbw: {
+ // Need to truncate the last argument from i32 to i8 -- this argument models
+ // an inherently 8-bit immediate operand to these x86 instructions.
+ SmallVector<Value *, 4> Args(CI->args());
+
+ // Replace the last argument with a trunc.
+ Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
+ }
+
+ case Intrinsic::x86_avx512_mask_cmp_pd_128:
+ case Intrinsic::x86_avx512_mask_cmp_pd_256:
+ case Intrinsic::x86_avx512_mask_cmp_pd_512:
+ case Intrinsic::x86_avx512_mask_cmp_ps_128:
+ case Intrinsic::x86_avx512_mask_cmp_ps_256:
+ case Intrinsic::x86_avx512_mask_cmp_ps_512: {
+ SmallVector<Value *, 4> Args(CI->args());
+ unsigned NumElts =
+ cast<FixedVectorType>(Args[0]->getType())->getNumElements();
+ Args[3] = getX86MaskVec(Builder, Args[3], NumElts);
+
+ NewCall = Builder.CreateCall(NewFn, Args);
+ Value *Res = ApplyX86MaskOn1BitsVec(Builder, NewCall, nullptr);
+
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(Res);
+ CI->eraseFromParent();
+ return;
+ }
+
+ case Intrinsic::thread_pointer: {
+ NewCall = Builder.CreateCall(NewFn, {});
+ break;
+ }
+
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end: {
+ SmallVector<Value *, 4> Args(CI->args());
+ NewCall = Builder.CreateCall(NewFn, Args);
+ break;
+ }
+ case Intrinsic::masked_load:
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_scatter: {
+ SmallVector<Value *, 4> Args(CI->args());
+ NewCall = Builder.CreateCall(NewFn, Args);
+ NewCall->copyMetadata(*CI);
+ break;
+ }
+
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset: {
+ // We have to make sure that the call signature is what we're expecting.
+ // We only want to change the old signatures by removing the alignment arg:
+ // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
+ // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
+ // @llvm.memset...(i8*, i8, i[32|64], i32, i1)
+ // -> @llvm.memset...(i8*, i8, i[32|64], i1)
+ // Note: i8*'s in the above can be any pointer type
+ if (CI->arg_size() != 5) {
+ DefaultCase();
+ return;
+ }
+ // Remove alignment argument (3), and add alignment attributes to the
+ // dest/src pointers.
+ Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(4)};
+ NewCall = Builder.CreateCall(NewFn, Args);
+ AttributeList OldAttrs = CI->getAttributes();
+ AttributeList NewAttrs = AttributeList::get(
+ C, OldAttrs.getFnAttrs(), OldAttrs.getRetAttrs(),
+ {OldAttrs.getParamAttrs(0), OldAttrs.getParamAttrs(1),
+ OldAttrs.getParamAttrs(2), OldAttrs.getParamAttrs(4)});
+ NewCall->setAttributes(NewAttrs);
+ auto *MemCI = cast<MemIntrinsic>(NewCall);
+ // All mem intrinsics support dest alignment.
+ const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
+ MemCI->setDestAlignment(Align->getMaybeAlignValue());
+ // Memcpy/Memmove also support source alignment.
+ if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
+ MTI->setSourceAlignment(Align->getMaybeAlignValue());
+ break;
+ }
+ }
+ assert(NewCall && "Should have either set this variable or returned through "
+ "the default case");
+ NewCall->takeName(CI);
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+}
+
+void llvm::UpgradeCallsToIntrinsic(Function *F) {
+ assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
+
+ // Check if this function should be upgraded and get the replacement function
+ // if there is one.
+ Function *NewFn;
+ if (UpgradeIntrinsicFunction(F, NewFn)) {
+ // Replace all users of the old function with the new function or new
+ // instructions. This is not a range loop because the call is deleted.
+ for (User *U : make_early_inc_range(F->users()))
+ if (CallBase *CB = dyn_cast<CallBase>(U))
+ UpgradeIntrinsicCall(CB, NewFn);
+
+ // Remove old function, no longer used, from the module.
+ F->eraseFromParent();
+ }
+}
+
+MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
+ // Check if the tag uses struct-path aware TBAA format.
+ if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
+ return &MD;
+
+ auto &Context = MD.getContext();
+ if (MD.getNumOperands() == 3) {
+ Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
+ MDNode *ScalarType = MDNode::get(Context, Elts);
+ // Create a MDNode <ScalarType, ScalarType, offset 0, const>
+ Metadata *Elts2[] = {ScalarType, ScalarType,
+ ConstantAsMetadata::get(
+ Constant::getNullValue(Type::getInt64Ty(Context))),
+ MD.getOperand(2)};
+ return MDNode::get(Context, Elts2);
+ }
+ // Create a MDNode <MD, MD, offset 0>
+ Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
+ Type::getInt64Ty(Context)))};
+ return MDNode::get(Context, Elts);
+}
+
+Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
+ Instruction *&Temp) {
+ if (Opc != Instruction::BitCast)
+ return nullptr;
+
+ Temp = nullptr;
+ Type *SrcTy = V->getType();
+ if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
+ LLVMContext &Context = V->getContext();
+
+ // We have no information about target data layout, so we assume that
+ // the maximum pointer size is 64bit.
+ Type *MidTy = Type::getInt64Ty(Context);
+ Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
+
+ return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
+ }
+
+ return nullptr;
+}
+
+Constant *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
+ if (Opc != Instruction::BitCast)
+ return nullptr;
+
+ Type *SrcTy = C->getType();
+ if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
+ LLVMContext &Context = C->getContext();
+
+ // We have no information about target data layout, so we assume that
+ // the maximum pointer size is 64bit.
+ Type *MidTy = Type::getInt64Ty(Context);
+
+ return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
+ DestTy);
+ }
+
+ return nullptr;
+}
+
+/// Check the debug info version number, if it is out-dated, drop the debug
+/// info. Return true if module is modified.
+bool llvm::UpgradeDebugInfo(Module &M) {
+ unsigned Version = getDebugMetadataVersionFromModule(M);
+ if (Version == DEBUG_METADATA_VERSION) {
+ bool BrokenDebugInfo = false;
+ if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
+ report_fatal_error("Broken module found, compilation aborted!");
+ if (!BrokenDebugInfo)
+ // Everything is ok.
+ return false;
+ else {
+ // Diagnose malformed debug info.
+ DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M);
+ M.getContext().diagnose(Diag);
+ }
+ }
+ bool Modified = StripDebugInfo(M);
+ if (Modified && Version != DEBUG_METADATA_VERSION) {
+ // Diagnose a version mismatch.
+ DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
+ M.getContext().diagnose(DiagVersion);
+ }
+ return Modified;
+}
+
+/// This checks for objc retain release marker which should be upgraded. It
+/// returns true if module is modified.
+static bool UpgradeRetainReleaseMarker(Module &M) {
+ bool Changed = false;
+ const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
+ NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
+ if (ModRetainReleaseMarker) {
+ MDNode *Op = ModRetainReleaseMarker->getOperand(0);
+ if (Op) {
+ MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
+ if (ID) {
+ SmallVector<StringRef, 4> ValueComp;
+ ID->getString().split(ValueComp, "#");
+ if (ValueComp.size() == 2) {
+ std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
+ ID = MDString::get(M.getContext(), NewValue);
+ }
+ M.addModuleFlag(Module::Error, MarkerKey, ID);
+ M.eraseNamedMetadata(ModRetainReleaseMarker);
+ Changed = true;
+ }
+ }
+ }
+ return Changed;
+}
+
+void llvm::UpgradeARCRuntime(Module &M) {
+ // This lambda converts normal function calls to ARC runtime functions to
+ // intrinsic calls.
+ auto UpgradeToIntrinsic = [&](const char *OldFunc,
+ llvm::Intrinsic::ID IntrinsicFunc) {
+ Function *Fn = M.getFunction(OldFunc);
+
+ if (!Fn)
+ return;
+
+ Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
+
+ for (User *U : make_early_inc_range(Fn->users())) {
+ CallInst *CI = dyn_cast<CallInst>(U);
+ if (!CI || CI->getCalledFunction() != Fn)
+ continue;
+
+ IRBuilder<> Builder(CI->getParent(), CI->getIterator());
+ FunctionType *NewFuncTy = NewFn->getFunctionType();
+ SmallVector<Value *, 2> Args;
+
+ // Don't upgrade the intrinsic if it's not valid to bitcast the return
+ // value to the return type of the old function.
+ if (NewFuncTy->getReturnType() != CI->getType() &&
+ !CastInst::castIsValid(Instruction::BitCast, CI,
+ NewFuncTy->getReturnType()))
+ continue;
+
+ bool InvalidCast = false;
+
+ for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
+ Value *Arg = CI->getArgOperand(I);
+
+ // Bitcast argument to the parameter type of the new function if it's
+ // not a variadic argument.
+ if (I < NewFuncTy->getNumParams()) {
+ // Don't upgrade the intrinsic if it's not valid to bitcast the argument
+ // to the parameter type of the new function.
+ if (!CastInst::castIsValid(Instruction::BitCast, Arg,
+ NewFuncTy->getParamType(I))) {
+ InvalidCast = true;
+ break;
+ }
+ Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
+ }
+ Args.push_back(Arg);
+ }
+
+ if (InvalidCast)
+ continue;
+
+ // Create a call instruction that calls the new function.
+ CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
+ NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
+ NewCall->takeName(CI);
+
+ // Bitcast the return value back to the type of the old call.
+ Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
+
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewRetVal);
+ CI->eraseFromParent();
+ }
+
+ if (Fn->use_empty())
+ Fn->eraseFromParent();
+ };
+
+ // Unconditionally convert a call to "clang.arc.use" to a call to
+ // "llvm.objc.clang.arc.use".
+ UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
+
+ // Upgrade the retain release marker. If there is no need to upgrade
+ // the marker, that means either the module is already new enough to contain
+ // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
+ if (!UpgradeRetainReleaseMarker(M))
+ return;
+
+ std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
+ {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
+ {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
+ {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
+ {"objc_autoreleaseReturnValue",
+ llvm::Intrinsic::objc_autoreleaseReturnValue},
+ {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
+ {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
+ {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
+ {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
+ {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
+ {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
+ {"objc_release", llvm::Intrinsic::objc_release},
+ {"objc_retain", llvm::Intrinsic::objc_retain},
+ {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
+ {"objc_retainAutoreleaseReturnValue",
+ llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
+ {"objc_retainAutoreleasedReturnValue",
+ llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
+ {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
+ {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
+ {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
+ {"objc_unsafeClaimAutoreleasedReturnValue",
+ llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
+ {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
+ {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
+ {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
+ {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
+ {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
+ {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
+ {"objc_arc_annotation_topdown_bbstart",
+ llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
+ {"objc_arc_annotation_topdown_bbend",
+ llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
+ {"objc_arc_annotation_bottomup_bbstart",
+ llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
+ {"objc_arc_annotation_bottomup_bbend",
+ llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
+
+ for (auto &I : RuntimeFuncs)
+ UpgradeToIntrinsic(I.first, I.second);
+}
+
+bool llvm::UpgradeModuleFlags(Module &M) {
+ NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
+ if (!ModFlags)
+ return false;
+
+ bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
+ bool HasSwiftVersionFlag = false;
+ uint8_t SwiftMajorVersion, SwiftMinorVersion;
+ uint32_t SwiftABIVersion;
+ auto Int8Ty = Type::getInt8Ty(M.getContext());
+ auto Int32Ty = Type::getInt32Ty(M.getContext());
+
+ for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
+ MDNode *Op = ModFlags->getOperand(I);
+ if (Op->getNumOperands() != 3)
+ continue;
+ MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
+ if (!ID)
+ continue;
+ if (ID->getString() == "Objective-C Image Info Version")
+ HasObjCFlag = true;
+ if (ID->getString() == "Objective-C Class Properties")
+ HasClassProperties = true;
+ // Upgrade PIC/PIE Module Flags. The module flag behavior for these two
+ // field was Error and now they are Max.
+ if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") {
+ if (auto *Behavior =
+ mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
+ if (Behavior->getLimitedValue() == Module::Error) {
+ Type *Int32Ty = Type::getInt32Ty(M.getContext());
+ Metadata *Ops[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)),
+ MDString::get(M.getContext(), ID->getString()),
+ Op->getOperand(2)};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
+
+ // Upgrade branch protection and return address signing module flags. The
+ // module flag behavior for these fields were Error and now they are Min.
+ if (ID->getString() == "branch-target-enforcement" ||
+ ID->getString().startswith("sign-return-address")) {
+ if (auto *Behavior =
+ mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
+ if (Behavior->getLimitedValue() == Module::Error) {
+ Type *Int32Ty = Type::getInt32Ty(M.getContext());
+ Metadata *Ops[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Min)),
+ Op->getOperand(1), Op->getOperand(2)};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
+
+ // Upgrade Objective-C Image Info Section. Removed the whitespce in the
+ // section name so that llvm-lto will not complain about mismatching
+ // module flags that is functionally the same.
+ if (ID->getString() == "Objective-C Image Info Section") {
+ if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
+ SmallVector<StringRef, 4> ValueComp;
+ Value->getString().split(ValueComp, " ");
+ if (ValueComp.size() != 1) {
+ std::string NewValue;
+ for (auto &S : ValueComp)
+ NewValue += S.str();
+ Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
+ MDString::get(M.getContext(), NewValue)};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
+
+ // IRUpgrader turns a i32 type "Objective-C Garbage Collection" into i8 value.
+ // If the higher bits are set, it adds new module flag for swift info.
+ if (ID->getString() == "Objective-C Garbage Collection") {
+ auto Md = dyn_cast<ConstantAsMetadata>(Op->getOperand(2));
+ if (Md) {
+ assert(Md->getValue() && "Expected non-empty metadata");
+ auto Type = Md->getValue()->getType();
+ if (Type == Int8Ty)
+ continue;
+ unsigned Val = Md->getValue()->getUniqueInteger().getZExtValue();
+ if ((Val & 0xff) != Val) {
+ HasSwiftVersionFlag = true;
+ SwiftABIVersion = (Val & 0xff00) >> 8;
+ SwiftMajorVersion = (Val & 0xff000000) >> 24;
+ SwiftMinorVersion = (Val & 0xff0000) >> 16;
+ }
+ Metadata *Ops[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty,Module::Error)),
+ Op->getOperand(1),
+ ConstantAsMetadata::get(ConstantInt::get(Int8Ty,Val & 0xff))};
+ ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
+ Changed = true;
+ }
+ }
+ }
+
+ // "Objective-C Class Properties" is recently added for Objective-C. We
+ // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
+ // flag of value 0, so we can correclty downgrade this flag when trying to
+ // link an ObjC bitcode without this module flag with an ObjC bitcode with
+ // this module flag.
+ if (HasObjCFlag && !HasClassProperties) {
+ M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
+ (uint32_t)0);
+ Changed = true;
+ }
+
+ if (HasSwiftVersionFlag) {
+ M.addModuleFlag(Module::Error, "Swift ABI Version",
+ SwiftABIVersion);
+ M.addModuleFlag(Module::Error, "Swift Major Version",
+ ConstantInt::get(Int8Ty, SwiftMajorVersion));
+ M.addModuleFlag(Module::Error, "Swift Minor Version",
+ ConstantInt::get(Int8Ty, SwiftMinorVersion));
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+void llvm::UpgradeSectionAttributes(Module &M) {
+ auto TrimSpaces = [](StringRef Section) -> std::string {
+ SmallVector<StringRef, 5> Components;
+ Section.split(Components, ',');
+
+ SmallString<32> Buffer;
+ raw_svector_ostream OS(Buffer);
+
+ for (auto Component : Components)
+ OS << ',' << Component.trim();
+
+ return std::string(OS.str().substr(1));
+ };
+
+ for (auto &GV : M.globals()) {
+ if (!GV.hasSection())
+ continue;
+
+ StringRef Section = GV.getSection();
+
+ if (!Section.startswith("__DATA, __objc_catlist"))
+ continue;
+
+ // __DATA, __objc_catlist, regular, no_dead_strip
+ // __DATA,__objc_catlist,regular,no_dead_strip
+ GV.setSection(TrimSpaces(Section));
+ }
+}
+
+namespace {
+// Prior to LLVM 10.0, the strictfp attribute could be used on individual
+// callsites within a function that did not also have the strictfp attribute.
+// Since 10.0, if strict FP semantics are needed within a function, the
+// function must have the strictfp attribute and all calls within the function
+// must also have the strictfp attribute. This latter restriction is
+// necessary to prevent unwanted libcall simplification when a function is
+// being cloned (such as for inlining).
+//
+// The "dangling" strictfp attribute usage was only used to prevent constant
+// folding and other libcall simplification. The nobuiltin attribute on the
+// callsite has the same effect.
+struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {
+ StrictFPUpgradeVisitor() = default;
+
+ void visitCallBase(CallBase &Call) {
+ if (!Call.isStrictFP())
+ return;
+ if (isa<ConstrainedFPIntrinsic>(&Call))
+ return;
+ // If we get here, the caller doesn't have the strictfp attribute
+ // but this callsite does. Replace the strictfp attribute with nobuiltin.
+ Call.removeFnAttr(Attribute::StrictFP);
+ Call.addFnAttr(Attribute::NoBuiltin);
+ }
+};
+} // namespace
+
+void llvm::UpgradeFunctionAttributes(Function &F) {
+ // If a function definition doesn't have the strictfp attribute,
+ // convert any callsite strictfp attributes to nobuiltin.
+ if (!F.isDeclaration() && !F.hasFnAttribute(Attribute::StrictFP)) {
+ StrictFPUpgradeVisitor SFPV;
+ SFPV.visit(F);
+ }
+
+ // Remove all incompatibile attributes from function.
+ F.removeRetAttrs(AttributeFuncs::typeIncompatible(F.getReturnType()));
+ for (auto &Arg : F.args())
+ Arg.removeAttrs(AttributeFuncs::typeIncompatible(Arg.getType()));
+}
+
+static bool isOldLoopArgument(Metadata *MD) {
+ auto *T = dyn_cast_or_null<MDTuple>(MD);
+ if (!T)
+ return false;
+ if (T->getNumOperands() < 1)
+ return false;
+ auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
+ if (!S)
+ return false;
+ return S->getString().startswith("llvm.vectorizer.");
+}
+
+static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
+ StringRef OldPrefix = "llvm.vectorizer.";
+ assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
+
+ if (OldTag == "llvm.vectorizer.unroll")
+ return MDString::get(C, "llvm.loop.interleave.count");
+
+ return MDString::get(
+ C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
+ .str());
+}
+
+static Metadata *upgradeLoopArgument(Metadata *MD) {
+ auto *T = dyn_cast_or_null<MDTuple>(MD);
+ if (!T)
+ return MD;
+ if (T->getNumOperands() < 1)
+ return MD;
+ auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
+ if (!OldTag)
+ return MD;
+ if (!OldTag->getString().startswith("llvm.vectorizer."))
+ return MD;
+
+ // This has an old tag. Upgrade it.
+ SmallVector<Metadata *, 8> Ops;
+ Ops.reserve(T->getNumOperands());
+ Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
+ for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
+ Ops.push_back(T->getOperand(I));
+
+ return MDTuple::get(T->getContext(), Ops);
+}
+
+MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
+ auto *T = dyn_cast<MDTuple>(&N);
+ if (!T)
+ return &N;
+
+ if (none_of(T->operands(), isOldLoopArgument))
+ return &N;
+
+ SmallVector<Metadata *, 8> Ops;
+ Ops.reserve(T->getNumOperands());
+ for (Metadata *MD : T->operands())
+ Ops.push_back(upgradeLoopArgument(MD));
+
+ return MDTuple::get(T->getContext(), Ops);
+}
+
+std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
+ Triple T(TT);
+ // For AMDGPU we uprgrade older DataLayouts to include the default globals
+ // address space of 1.
+ if (T.isAMDGPU() && !DL.contains("-G") && !DL.startswith("G")) {
+ return DL.empty() ? std::string("G1") : (DL + "-G1").str();
+ }
+
+ std::string Res = DL.str();
+ if (!T.isX86())
+ return Res;
+
+ // If the datalayout matches the expected format, add pointer size address
+ // spaces to the datalayout.
+ std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
+ if (!DL.contains(AddrSpaces)) {
+ SmallVector<StringRef, 4> Groups;
+ Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
+ if (R.match(DL, &Groups))
+ Res = (Groups[1] + AddrSpaces + Groups[3]).str();
+ }
+
+ // For 32-bit MSVC targets, raise the alignment of f80 values to 16 bytes.
+ // Raising the alignment is safe because Clang did not produce f80 values in
+ // the MSVC environment before this upgrade was added.
+ if (T.isWindowsMSVCEnvironment() && !T.isArch64Bit()) {
+ StringRef Ref = Res;
+ auto I = Ref.find("-f80:32-");
+ if (I != StringRef::npos)
+ Res = (Ref.take_front(I) + "-f80:128-" + Ref.drop_front(I + 8)).str();
+ }
+
+ return Res;
+}
+
+void llvm::UpgradeAttributes(AttrBuilder &B) {
+ StringRef FramePointer;
+ Attribute A = B.getAttribute("no-frame-pointer-elim");
+ if (A.isValid()) {
+ // The value can be "true" or "false".
+ FramePointer = A.getValueAsString() == "true" ? "all" : "none";
+ B.removeAttribute("no-frame-pointer-elim");
+ }
+ if (B.contains("no-frame-pointer-elim-non-leaf")) {
+ // The value is ignored. "no-frame-pointer-elim"="true" takes priority.
+ if (FramePointer != "all")
+ FramePointer = "non-leaf";
+ B.removeAttribute("no-frame-pointer-elim-non-leaf");
+ }
+ if (!FramePointer.empty())
+ B.addAttribute("frame-pointer", FramePointer);
+
+ A = B.getAttribute("null-pointer-is-valid");
+ if (A.isValid()) {
+ // The value can be "true" or "false".
+ bool NullPointerIsValid = A.getValueAsString() == "true";
+ B.removeAttribute("null-pointer-is-valid");
+ if (NullPointerIsValid)
+ B.addAttribute(Attribute::NullPointerIsValid);
+ }
+}
+
+void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
+
+ // clang.arc.attachedcall bundles are now required to have an operand.
+ // If they don't, it's okay to drop them entirely: when there is an operand,
+ // the "attachedcall" is meaningful and required, but without an operand,
+ // it's just a marker NOP. Dropping it merely prevents an optimization.
+ erase_if(Bundles, [&](OperandBundleDef &OBD) {
+ return OBD.getTag() == "clang.arc.attachedcall" &&
+ OBD.inputs().empty();
+ });
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/BasicBlock.cpp b/contrib/llvm-project/llvm/lib/IR/BasicBlock.cpp
new file mode 100644
index 000000000000..f064ff503eba
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/BasicBlock.cpp
@@ -0,0 +1,522 @@
+//===-- BasicBlock.cpp - Implement BasicBlock related methods -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BasicBlock class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/BasicBlock.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Type.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ir"
+STATISTIC(NumInstrRenumberings, "Number of renumberings across all blocks");
+
+ValueSymbolTable *BasicBlock::getValueSymbolTable() {
+ if (Function *F = getParent())
+ return F->getValueSymbolTable();
+ return nullptr;
+}
+
+LLVMContext &BasicBlock::getContext() const {
+ return getType()->getContext();
+}
+
+template <> void llvm::invalidateParentIListOrdering(BasicBlock *BB) {
+ BB->invalidateOrders();
+}
+
+// Explicit instantiation of SymbolTableListTraits since some of the methods
+// are not in the public header file...
+template class llvm::SymbolTableListTraits<Instruction>;
+
+BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent,
+ BasicBlock *InsertBefore)
+ : Value(Type::getLabelTy(C), Value::BasicBlockVal), Parent(nullptr) {
+
+ if (NewParent)
+ insertInto(NewParent, InsertBefore);
+ else
+ assert(!InsertBefore &&
+ "Cannot insert block before another block with no function!");
+
+ setName(Name);
+}
+
+void BasicBlock::insertInto(Function *NewParent, BasicBlock *InsertBefore) {
+ assert(NewParent && "Expected a parent");
+ assert(!Parent && "Already has a parent");
+
+ if (InsertBefore)
+ NewParent->getBasicBlockList().insert(InsertBefore->getIterator(), this);
+ else
+ NewParent->getBasicBlockList().push_back(this);
+}
+
+BasicBlock::~BasicBlock() {
+ validateInstrOrdering();
+
+ // If the address of the block is taken and it is being deleted (e.g. because
+ // it is dead), this means that there is either a dangling constant expr
+ // hanging off the block, or an undefined use of the block (source code
+ // expecting the address of a label to keep the block alive even though there
+ // is no indirect branch). Handle these cases by zapping the BlockAddress
+ // nodes. There are no other possible uses at this point.
+ if (hasAddressTaken()) {
+ assert(!use_empty() && "There should be at least one blockaddress!");
+ Constant *Replacement =
+ ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1);
+ while (!use_empty()) {
+ BlockAddress *BA = cast<BlockAddress>(user_back());
+ BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
+ BA->getType()));
+ BA->destroyConstant();
+ }
+ }
+
+ assert(getParent() == nullptr && "BasicBlock still linked into the program!");
+ dropAllReferences();
+ InstList.clear();
+}
+
+void BasicBlock::setParent(Function *parent) {
+ // Set Parent=parent, updating instruction symtab entries as appropriate.
+ InstList.setSymTabObject(&Parent, parent);
+}
+
+iterator_range<filter_iterator<BasicBlock::const_iterator,
+ std::function<bool(const Instruction &)>>>
+BasicBlock::instructionsWithoutDebug(bool SkipPseudoOp) const {
+ std::function<bool(const Instruction &)> Fn = [=](const Instruction &I) {
+ return !isa<DbgInfoIntrinsic>(I) &&
+ !(SkipPseudoOp && isa<PseudoProbeInst>(I));
+ };
+ return make_filter_range(*this, Fn);
+}
+
+iterator_range<
+ filter_iterator<BasicBlock::iterator, std::function<bool(Instruction &)>>>
+BasicBlock::instructionsWithoutDebug(bool SkipPseudoOp) {
+ std::function<bool(Instruction &)> Fn = [=](Instruction &I) {
+ return !isa<DbgInfoIntrinsic>(I) &&
+ !(SkipPseudoOp && isa<PseudoProbeInst>(I));
+ };
+ return make_filter_range(*this, Fn);
+}
+
+filter_iterator<BasicBlock::const_iterator,
+ std::function<bool(const Instruction &)>>::difference_type
+BasicBlock::sizeWithoutDebug() const {
+ return std::distance(instructionsWithoutDebug().begin(),
+ instructionsWithoutDebug().end());
+}
+
+void BasicBlock::removeFromParent() {
+ getParent()->getBasicBlockList().remove(getIterator());
+}
+
+iplist<BasicBlock>::iterator BasicBlock::eraseFromParent() {
+ return getParent()->getBasicBlockList().erase(getIterator());
+}
+
+void BasicBlock::moveBefore(BasicBlock *MovePos) {
+ MovePos->getParent()->getBasicBlockList().splice(
+ MovePos->getIterator(), getParent()->getBasicBlockList(), getIterator());
+}
+
+void BasicBlock::moveAfter(BasicBlock *MovePos) {
+ MovePos->getParent()->getBasicBlockList().splice(
+ ++MovePos->getIterator(), getParent()->getBasicBlockList(),
+ getIterator());
+}
+
+const Module *BasicBlock::getModule() const {
+ return getParent()->getParent();
+}
+
+const CallInst *BasicBlock::getTerminatingMustTailCall() const {
+ if (InstList.empty())
+ return nullptr;
+ const ReturnInst *RI = dyn_cast<ReturnInst>(&InstList.back());
+ if (!RI || RI == &InstList.front())
+ return nullptr;
+
+ const Instruction *Prev = RI->getPrevNode();
+ if (!Prev)
+ return nullptr;
+
+ if (Value *RV = RI->getReturnValue()) {
+ if (RV != Prev)
+ return nullptr;
+
+ // Look through the optional bitcast.
+ if (auto *BI = dyn_cast<BitCastInst>(Prev)) {
+ RV = BI->getOperand(0);
+ Prev = BI->getPrevNode();
+ if (!Prev || RV != Prev)
+ return nullptr;
+ }
+ }
+
+ if (auto *CI = dyn_cast<CallInst>(Prev)) {
+ if (CI->isMustTailCall())
+ return CI;
+ }
+ return nullptr;
+}
+
+const CallInst *BasicBlock::getTerminatingDeoptimizeCall() const {
+ if (InstList.empty())
+ return nullptr;
+ auto *RI = dyn_cast<ReturnInst>(&InstList.back());
+ if (!RI || RI == &InstList.front())
+ return nullptr;
+
+ if (auto *CI = dyn_cast_or_null<CallInst>(RI->getPrevNode()))
+ if (Function *F = CI->getCalledFunction())
+ if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize)
+ return CI;
+
+ return nullptr;
+}
+
+const CallInst *BasicBlock::getPostdominatingDeoptimizeCall() const {
+ const BasicBlock* BB = this;
+ SmallPtrSet<const BasicBlock *, 8> Visited;
+ Visited.insert(BB);
+ while (auto *Succ = BB->getUniqueSuccessor()) {
+ if (!Visited.insert(Succ).second)
+ return nullptr;
+ BB = Succ;
+ }
+ return BB->getTerminatingDeoptimizeCall();
+}
+
+const Instruction* BasicBlock::getFirstNonPHI() const {
+ for (const Instruction &I : *this)
+ if (!isa<PHINode>(I))
+ return &I;
+ return nullptr;
+}
+
+const Instruction *BasicBlock::getFirstNonPHIOrDbg(bool SkipPseudoOp) const {
+ for (const Instruction &I : *this) {
+ if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
+ continue;
+
+ if (SkipPseudoOp && isa<PseudoProbeInst>(I))
+ continue;
+
+ return &I;
+ }
+ return nullptr;
+}
+
+const Instruction *
+BasicBlock::getFirstNonPHIOrDbgOrLifetime(bool SkipPseudoOp) const {
+ for (const Instruction &I : *this) {
+ if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
+ continue;
+
+ if (I.isLifetimeStartOrEnd())
+ continue;
+
+ if (SkipPseudoOp && isa<PseudoProbeInst>(I))
+ continue;
+
+ return &I;
+ }
+ return nullptr;
+}
+
+BasicBlock::const_iterator BasicBlock::getFirstInsertionPt() const {
+ const Instruction *FirstNonPHI = getFirstNonPHI();
+ if (!FirstNonPHI)
+ return end();
+
+ const_iterator InsertPt = FirstNonPHI->getIterator();
+ if (InsertPt->isEHPad()) ++InsertPt;
+ return InsertPt;
+}
+
+void BasicBlock::dropAllReferences() {
+ for (Instruction &I : *this)
+ I.dropAllReferences();
+}
+
+const BasicBlock *BasicBlock::getSinglePredecessor() const {
+ const_pred_iterator PI = pred_begin(this), E = pred_end(this);
+ if (PI == E) return nullptr; // No preds.
+ const BasicBlock *ThePred = *PI;
+ ++PI;
+ return (PI == E) ? ThePred : nullptr /*multiple preds*/;
+}
+
+const BasicBlock *BasicBlock::getUniquePredecessor() const {
+ const_pred_iterator PI = pred_begin(this), E = pred_end(this);
+ if (PI == E) return nullptr; // No preds.
+ const BasicBlock *PredBB = *PI;
+ ++PI;
+ for (;PI != E; ++PI) {
+ if (*PI != PredBB)
+ return nullptr;
+ // The same predecessor appears multiple times in the predecessor list.
+ // This is OK.
+ }
+ return PredBB;
+}
+
+bool BasicBlock::hasNPredecessors(unsigned N) const {
+ return hasNItems(pred_begin(this), pred_end(this), N);
+}
+
+bool BasicBlock::hasNPredecessorsOrMore(unsigned N) const {
+ return hasNItemsOrMore(pred_begin(this), pred_end(this), N);
+}
+
+const BasicBlock *BasicBlock::getSingleSuccessor() const {
+ const_succ_iterator SI = succ_begin(this), E = succ_end(this);
+ if (SI == E) return nullptr; // no successors
+ const BasicBlock *TheSucc = *SI;
+ ++SI;
+ return (SI == E) ? TheSucc : nullptr /* multiple successors */;
+}
+
+const BasicBlock *BasicBlock::getUniqueSuccessor() const {
+ const_succ_iterator SI = succ_begin(this), E = succ_end(this);
+ if (SI == E) return nullptr; // No successors
+ const BasicBlock *SuccBB = *SI;
+ ++SI;
+ for (;SI != E; ++SI) {
+ if (*SI != SuccBB)
+ return nullptr;
+ // The same successor appears multiple times in the successor list.
+ // This is OK.
+ }
+ return SuccBB;
+}
+
+iterator_range<BasicBlock::phi_iterator> BasicBlock::phis() {
+ PHINode *P = empty() ? nullptr : dyn_cast<PHINode>(&*begin());
+ return make_range<phi_iterator>(P, nullptr);
+}
+
+void BasicBlock::removePredecessor(BasicBlock *Pred,
+ bool KeepOneInputPHIs) {
+ // Use hasNUsesOrMore to bound the cost of this assertion for complex CFGs.
+ assert((hasNUsesOrMore(16) || llvm::is_contained(predecessors(this), Pred)) &&
+ "Pred is not a predecessor!");
+
+ // Return early if there are no PHI nodes to update.
+ if (empty() || !isa<PHINode>(begin()))
+ return;
+
+ unsigned NumPreds = cast<PHINode>(front()).getNumIncomingValues();
+ for (PHINode &Phi : make_early_inc_range(phis())) {
+ Phi.removeIncomingValue(Pred, !KeepOneInputPHIs);
+ if (KeepOneInputPHIs)
+ continue;
+
+ // If we have a single predecessor, removeIncomingValue may have erased the
+ // PHI node itself.
+ if (NumPreds == 1)
+ continue;
+
+ // Try to replace the PHI node with a constant value.
+ if (Value *PhiConstant = Phi.hasConstantValue()) {
+ Phi.replaceAllUsesWith(PhiConstant);
+ Phi.eraseFromParent();
+ }
+ }
+}
+
+bool BasicBlock::canSplitPredecessors() const {
+ const Instruction *FirstNonPHI = getFirstNonPHI();
+ if (isa<LandingPadInst>(FirstNonPHI))
+ return true;
+ // This is perhaps a little conservative because constructs like
+ // CleanupBlockInst are pretty easy to split. However, SplitBlockPredecessors
+ // cannot handle such things just yet.
+ if (FirstNonPHI->isEHPad())
+ return false;
+ return true;
+}
+
+bool BasicBlock::isLegalToHoistInto() const {
+ auto *Term = getTerminator();
+ // No terminator means the block is under construction.
+ if (!Term)
+ return true;
+
+ // If the block has no successors, there can be no instructions to hoist.
+ assert(Term->getNumSuccessors() > 0);
+
+ // Instructions should not be hoisted across exception handling boundaries.
+ return !Term->isExceptionalTerminator();
+}
+
+bool BasicBlock::isEntryBlock() const {
+ const Function *F = getParent();
+ assert(F && "Block must have a parent function to use this API");
+ return this == &F->getEntryBlock();
+}
+
+BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName,
+ bool Before) {
+ if (Before)
+ return splitBasicBlockBefore(I, BBName);
+
+ assert(getTerminator() && "Can't use splitBasicBlock on degenerate BB!");
+ assert(I != InstList.end() &&
+ "Trying to get me to create degenerate basic block!");
+
+ BasicBlock *New = BasicBlock::Create(getContext(), BBName, getParent(),
+ this->getNextNode());
+
+ // Save DebugLoc of split point before invalidating iterator.
+ DebugLoc Loc = I->getDebugLoc();
+ // Move all of the specified instructions from the original basic block into
+ // the new basic block.
+ New->getInstList().splice(New->end(), this->getInstList(), I, end());
+
+ // Add a branch instruction to the newly formed basic block.
+ BranchInst *BI = BranchInst::Create(New, this);
+ BI->setDebugLoc(Loc);
+
+ // Now we must loop through all of the successors of the New block (which
+ // _were_ the successors of the 'this' block), and update any PHI nodes in
+ // successors. If there were PHI nodes in the successors, then they need to
+ // know that incoming branches will be from New, not from Old (this).
+ //
+ New->replaceSuccessorsPhiUsesWith(this, New);
+ return New;
+}
+
+BasicBlock *BasicBlock::splitBasicBlockBefore(iterator I, const Twine &BBName) {
+ assert(getTerminator() &&
+ "Can't use splitBasicBlockBefore on degenerate BB!");
+ assert(I != InstList.end() &&
+ "Trying to get me to create degenerate basic block!");
+
+ assert((!isa<PHINode>(*I) || getSinglePredecessor()) &&
+ "cannot split on multi incoming phis");
+
+ BasicBlock *New = BasicBlock::Create(getContext(), BBName, getParent(), this);
+ // Save DebugLoc of split point before invalidating iterator.
+ DebugLoc Loc = I->getDebugLoc();
+ // Move all of the specified instructions from the original basic block into
+ // the new basic block.
+ New->getInstList().splice(New->end(), this->getInstList(), begin(), I);
+
+ // Loop through all of the predecessors of the 'this' block (which will be the
+ // predecessors of the New block), replace the specified successor 'this'
+ // block to point at the New block and update any PHI nodes in 'this' block.
+ // If there were PHI nodes in 'this' block, the PHI nodes are updated
+ // to reflect that the incoming branches will be from the New block and not
+ // from predecessors of the 'this' block.
+ for (BasicBlock *Pred : predecessors(this)) {
+ Instruction *TI = Pred->getTerminator();
+ TI->replaceSuccessorWith(this, New);
+ this->replacePhiUsesWith(Pred, New);
+ }
+ // Add a branch instruction from "New" to "this" Block.
+ BranchInst *BI = BranchInst::Create(this, New);
+ BI->setDebugLoc(Loc);
+
+ return New;
+}
+
+void BasicBlock::replacePhiUsesWith(BasicBlock *Old, BasicBlock *New) {
+ // N.B. This might not be a complete BasicBlock, so don't assume
+ // that it ends with a non-phi instruction.
+ for (Instruction &I : *this) {
+ PHINode *PN = dyn_cast<PHINode>(&I);
+ if (!PN)
+ break;
+ PN->replaceIncomingBlockWith(Old, New);
+ }
+}
+
+void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *Old,
+ BasicBlock *New) {
+ Instruction *TI = getTerminator();
+ if (!TI)
+ // Cope with being called on a BasicBlock that doesn't have a terminator
+ // yet. Clang's CodeGenFunction::EmitReturnBlock() likes to do this.
+ return;
+ for (BasicBlock *Succ : successors(TI))
+ Succ->replacePhiUsesWith(Old, New);
+}
+
+void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *New) {
+ this->replaceSuccessorsPhiUsesWith(this, New);
+}
+
+bool BasicBlock::isLandingPad() const {
+ return isa<LandingPadInst>(getFirstNonPHI());
+}
+
+const LandingPadInst *BasicBlock::getLandingPadInst() const {
+ return dyn_cast<LandingPadInst>(getFirstNonPHI());
+}
+
+Optional<uint64_t> BasicBlock::getIrrLoopHeaderWeight() const {
+ const Instruction *TI = getTerminator();
+ if (MDNode *MDIrrLoopHeader =
+ TI->getMetadata(LLVMContext::MD_irr_loop)) {
+ MDString *MDName = cast<MDString>(MDIrrLoopHeader->getOperand(0));
+ if (MDName->getString().equals("loop_header_weight")) {
+ auto *CI = mdconst::extract<ConstantInt>(MDIrrLoopHeader->getOperand(1));
+ return Optional<uint64_t>(CI->getValue().getZExtValue());
+ }
+ }
+ return Optional<uint64_t>();
+}
+
+BasicBlock::iterator llvm::skipDebugIntrinsics(BasicBlock::iterator It) {
+ while (isa<DbgInfoIntrinsic>(It))
+ ++It;
+ return It;
+}
+
+void BasicBlock::renumberInstructions() {
+ unsigned Order = 0;
+ for (Instruction &I : *this)
+ I.Order = Order++;
+
+ // Set the bit to indicate that the instruction order valid and cached.
+ BasicBlockBits Bits = getBasicBlockBits();
+ Bits.InstrOrderValid = true;
+ setBasicBlockBits(Bits);
+
+ NumInstrRenumberings++;
+}
+
+#ifndef NDEBUG
+/// In asserts builds, this checks the numbering. In non-asserts builds, it
+/// is defined as a no-op inline function in BasicBlock.h.
+void BasicBlock::validateInstrOrdering() const {
+ if (!isInstrOrderValid())
+ return;
+ const Instruction *Prev = nullptr;
+ for (const Instruction &I : *this) {
+ assert((!Prev || Prev->comesBefore(&I)) &&
+ "cached instruction ordering is incorrect");
+ Prev = &I;
+ }
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/IR/BuiltinGCs.cpp b/contrib/llvm-project/llvm/lib/IR/BuiltinGCs.cpp
new file mode 100644
index 000000000000..e9ef034c488f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/BuiltinGCs.cpp
@@ -0,0 +1,130 @@
+//===- BuiltinGCs.cpp - Boilerplate for our built in GC types -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the boilerplate required to define our various built in
+// gc lowering strategies.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/BuiltinGCs.h"
+#include "llvm/IR/GCStrategy.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/Support/Casting.h"
+
+using namespace llvm;
+
+namespace {
+
+/// An example GC which attempts to be compatibile with Erlang/OTP garbage
+/// collector.
+///
+/// The frametable emitter is in ErlangGCPrinter.cpp.
+class ErlangGC : public GCStrategy {
+public:
+ ErlangGC() {
+ NeededSafePoints = true;
+ UsesMetadata = true;
+ }
+};
+
+/// An example GC which attempts to be compatible with Objective Caml 3.10.0
+///
+/// The frametable emitter is in OcamlGCPrinter.cpp.
+class OcamlGC : public GCStrategy {
+public:
+ OcamlGC() {
+ NeededSafePoints = true;
+ UsesMetadata = true;
+ }
+};
+
+/// A GC strategy for uncooperative targets. This implements lowering for the
+/// llvm.gc* intrinsics for targets that do not natively support them (which
+/// includes the C backend). Note that the code generated is not quite as
+/// efficient as algorithms which generate stack maps to identify roots.
+///
+/// In order to support this particular transformation, all stack roots are
+/// coallocated in the stack. This allows a fully target-independent stack map
+/// while introducing only minor runtime overhead.
+class ShadowStackGC : public GCStrategy {
+public:
+ ShadowStackGC() = default;
+};
+
+/// A GCStrategy which serves as an example for the usage of a statepoint based
+/// lowering strategy. This GCStrategy is intended to suitable as a default
+/// implementation usable with any collector which can consume the standard
+/// stackmap format generated by statepoints, uses the default addrespace to
+/// distinguish between gc managed and non-gc managed pointers, and has
+/// reasonable relocation semantics.
+class StatepointGC : public GCStrategy {
+public:
+ StatepointGC() {
+ UseStatepoints = true;
+ // These options are all gc.root specific, we specify them so that the
+ // gc.root lowering code doesn't run.
+ NeededSafePoints = false;
+ UsesMetadata = false;
+ }
+
+ Optional<bool> isGCManagedPointer(const Type *Ty) const override {
+ // Method is only valid on pointer typed values.
+ const PointerType *PT = cast<PointerType>(Ty);
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
+ // GC managed heap. We know that a pointer into this heap needs to be
+ // updated and that no other pointer does. Note that addrspace(1) is used
+ // only as an example, it has no special meaning, and is not reserved for
+ // GC usage.
+ return (1 == PT->getAddressSpace());
+ }
+};
+
+/// A GCStrategy for the CoreCLR Runtime. The strategy is similar to
+/// Statepoint-example GC, but differs from it in certain aspects, such as:
+/// 1) Base-pointers need not be explicitly tracked and reported for
+/// interior pointers
+/// 2) Uses a different format for encoding stack-maps
+/// 3) Location of Safe-point polls: polls are only needed before loop-back
+/// edges and before tail-calls (not needed at function-entry)
+///
+/// The above differences in behavior are to be implemented in upcoming
+/// checkins.
+class CoreCLRGC : public GCStrategy {
+public:
+ CoreCLRGC() {
+ UseStatepoints = true;
+ // These options are all gc.root specific, we specify them so that the
+ // gc.root lowering code doesn't run.
+ NeededSafePoints = false;
+ UsesMetadata = false;
+ }
+
+ Optional<bool> isGCManagedPointer(const Type *Ty) const override {
+ // Method is only valid on pointer typed values.
+ const PointerType *PT = cast<PointerType>(Ty);
+ // We pick addrspace(1) as our GC managed heap.
+ return (1 == PT->getAddressSpace());
+ }
+};
+
+} // end anonymous namespace
+
+// Register all the above so that they can be found at runtime. Note that
+// these static initializers are important since the registration list is
+// constructed from their storage.
+static GCRegistry::Add<ErlangGC> A("erlang",
+ "erlang-compatible garbage collector");
+static GCRegistry::Add<OcamlGC> B("ocaml", "ocaml 3.10-compatible GC");
+static GCRegistry::Add<ShadowStackGC>
+ C("shadow-stack", "Very portable GC for uncooperative code generators");
+static GCRegistry::Add<StatepointGC> D("statepoint-example",
+ "an example strategy for statepoint");
+static GCRegistry::Add<CoreCLRGC> E("coreclr", "CoreCLR-compatible GC");
+
+// Provide hook to ensure the containing library is fully loaded.
+void llvm::linkAllBuiltinGCs() {}
diff --git a/contrib/llvm-project/llvm/lib/IR/Comdat.cpp b/contrib/llvm-project/llvm/lib/IR/Comdat.cpp
new file mode 100644
index 000000000000..2cd6db913621
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Comdat.cpp
@@ -0,0 +1,83 @@
+//===- Comdat.cpp - Implement Metadata classes ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Comdat class (including the C bindings).
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/Comdat.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMapEntry.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+
+using namespace llvm;
+
+Comdat::Comdat(Comdat &&C) : Name(C.Name), SK(C.SK) {}
+
+Comdat::Comdat() = default;
+
+StringRef Comdat::getName() const { return Name->first(); }
+
+void Comdat::addUser(GlobalObject *GO) { Users.insert(GO); }
+
+void Comdat::removeUser(GlobalObject *GO) { Users.erase(GO); }
+
+LLVMComdatRef LLVMGetOrInsertComdat(LLVMModuleRef M, const char *Name) {
+ return wrap(unwrap(M)->getOrInsertComdat(Name));
+}
+
+LLVMComdatRef LLVMGetComdat(LLVMValueRef V) {
+ GlobalObject *G = unwrap<GlobalObject>(V);
+ return wrap(G->getComdat());
+}
+
+void LLVMSetComdat(LLVMValueRef V, LLVMComdatRef C) {
+ GlobalObject *G = unwrap<GlobalObject>(V);
+ G->setComdat(unwrap(C));
+}
+
+LLVMComdatSelectionKind LLVMGetComdatSelectionKind(LLVMComdatRef C) {
+ switch (unwrap(C)->getSelectionKind()) {
+ case Comdat::Any:
+ return LLVMAnyComdatSelectionKind;
+ case Comdat::ExactMatch:
+ return LLVMExactMatchComdatSelectionKind;
+ case Comdat::Largest:
+ return LLVMLargestComdatSelectionKind;
+ case Comdat::NoDeduplicate:
+ return LLVMNoDeduplicateComdatSelectionKind;
+ case Comdat::SameSize:
+ return LLVMSameSizeComdatSelectionKind;
+ }
+ llvm_unreachable("Invalid Comdat SelectionKind!");
+}
+
+void LLVMSetComdatSelectionKind(LLVMComdatRef C, LLVMComdatSelectionKind kind) {
+ Comdat *Cd = unwrap(C);
+ switch (kind) {
+ case LLVMAnyComdatSelectionKind:
+ Cd->setSelectionKind(Comdat::Any);
+ break;
+ case LLVMExactMatchComdatSelectionKind:
+ Cd->setSelectionKind(Comdat::ExactMatch);
+ break;
+ case LLVMLargestComdatSelectionKind:
+ Cd->setSelectionKind(Comdat::Largest);
+ break;
+ case LLVMNoDeduplicateComdatSelectionKind:
+ Cd->setSelectionKind(Comdat::NoDeduplicate);
+ break;
+ case LLVMSameSizeComdatSelectionKind:
+ Cd->setSelectionKind(Comdat::SameSize);
+ break;
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
new file mode 100644
index 000000000000..98adff107cec
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ConstantFold.cpp
@@ -0,0 +1,2281 @@
+//===- ConstantFold.cpp - LLVM constant folder ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements folding of constants for LLVM. This implements the
+// (internal) ConstantFold.h interface, which is used by the
+// ConstantExpr::get* methods to automatically fold constants when possible.
+//
+// The current constant folding implementation is implemented in two pieces: the
+// pieces that don't need DataLayout, and the pieces that do. This is to avoid
+// a dependence in IR on Target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/ConstantFold.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+using namespace llvm::PatternMatch;
+
+//===----------------------------------------------------------------------===//
+// ConstantFold*Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+/// Convert the specified vector Constant node to the specified vector type.
+/// At this point, we know that the elements of the input vector constant are
+/// all simple integer or FP values.
+static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
+
+ if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
+ if (CV->isNullValue()) return Constant::getNullValue(DstTy);
+
+ // Do not iterate on scalable vector. The num of elements is unknown at
+ // compile-time.
+ if (isa<ScalableVectorType>(DstTy))
+ return nullptr;
+
+ // If this cast changes element count then we can't handle it here:
+ // doing so requires endianness information. This should be handled by
+ // Analysis/ConstantFolding.cpp
+ unsigned NumElts = cast<FixedVectorType>(DstTy)->getNumElements();
+ if (NumElts != cast<FixedVectorType>(CV->getType())->getNumElements())
+ return nullptr;
+
+ Type *DstEltTy = DstTy->getElementType();
+ // Fast path for splatted constants.
+ if (Constant *Splat = CV->getSplatValue()) {
+ return ConstantVector::getSplat(DstTy->getElementCount(),
+ ConstantExpr::getBitCast(Splat, DstEltTy));
+ }
+
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(CV->getContext(), 32);
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C =
+ ConstantExpr::getExtractElement(CV, ConstantInt::get(Ty, i));
+ C = ConstantExpr::getBitCast(C, DstEltTy);
+ Result.push_back(C);
+ }
+
+ return ConstantVector::get(Result);
+}
+
+/// This function determines which opcode to use to fold two constant cast
+/// expressions together. It uses CastInst::isEliminableCastPair to determine
+/// the opcode. Consequently its just a wrapper around that function.
+/// Determine if it is valid to fold a cast of a cast
+static unsigned
+foldConstantCastPair(
+ unsigned opc, ///< opcode of the second cast constant expression
+ ConstantExpr *Op, ///< the first cast constant expression
+ Type *DstTy ///< destination type of the first cast
+) {
+ assert(Op && Op->isCast() && "Can't fold cast of cast without a cast!");
+ assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type");
+ assert(CastInst::isCast(opc) && "Invalid cast opcode");
+
+ // The types and opcodes for the two Cast constant expressions
+ Type *SrcTy = Op->getOperand(0)->getType();
+ Type *MidTy = Op->getType();
+ Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
+ Instruction::CastOps secondOp = Instruction::CastOps(opc);
+
+ // Assume that pointers are never more than 64 bits wide, and only use this
+ // for the middle type. Otherwise we could end up folding away illegal
+ // bitcasts between address spaces with different sizes.
+ IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext());
+
+ // Let CastInst::isEliminableCastPair do the heavy lifting.
+ return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy,
+ nullptr, FakeIntPtrTy, nullptr);
+}
+
+static Constant *FoldBitCast(Constant *V, Type *DestTy) {
+ Type *SrcTy = V->getType();
+ if (SrcTy == DestTy)
+ return V; // no-op cast
+
+ // Check to see if we are casting a pointer to an aggregate to a pointer to
+ // the first element. If so, return the appropriate GEP instruction.
+ if (PointerType *PTy = dyn_cast<PointerType>(V->getType()))
+ if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
+ if (PTy->getAddressSpace() == DPTy->getAddressSpace() &&
+ !PTy->isOpaque() && !DPTy->isOpaque() &&
+ PTy->getNonOpaquePointerElementType()->isSized()) {
+ SmallVector<Value*, 8> IdxList;
+ Value *Zero =
+ Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
+ IdxList.push_back(Zero);
+ Type *ElTy = PTy->getNonOpaquePointerElementType();
+ while (ElTy && ElTy != DPTy->getNonOpaquePointerElementType()) {
+ ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, (uint64_t)0);
+ IdxList.push_back(Zero);
+ }
+
+ if (ElTy == DPTy->getNonOpaquePointerElementType())
+ // This GEP is inbounds because all indices are zero.
+ return ConstantExpr::getInBoundsGetElementPtr(
+ PTy->getNonOpaquePointerElementType(), V, IdxList);
+ }
+
+ // Handle casts from one vector constant to another. We know that the src
+ // and dest type have the same size (otherwise its an illegal cast).
+ if (VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) {
+ if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
+ assert(DestPTy->getPrimitiveSizeInBits() ==
+ SrcTy->getPrimitiveSizeInBits() &&
+ "Not cast between same sized vectors!");
+ SrcTy = nullptr;
+ // First, check for null. Undef is already handled.
+ if (isa<ConstantAggregateZero>(V))
+ return Constant::getNullValue(DestTy);
+
+ // Handle ConstantVector and ConstantAggregateVector.
+ return BitCastConstantVector(V, DestPTy);
+ }
+
+ // Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts
+ // This allows for other simplifications (although some of them
+ // can only be handled by Analysis/ConstantFolding.cpp).
+ if (isa<ConstantInt>(V) || isa<ConstantFP>(V))
+ return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy);
+ }
+
+ // Finally, implement bitcast folding now. The code below doesn't handle
+ // bitcast right.
+ if (isa<ConstantPointerNull>(V)) // ptr->ptr cast.
+ return ConstantPointerNull::get(cast<PointerType>(DestTy));
+
+ // Handle integral constant input.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ if (DestTy->isIntegerTy())
+ // Integral -> Integral. This is a no-op because the bit widths must
+ // be the same. Consequently, we just fold to V.
+ return V;
+
+ // See note below regarding the PPC_FP128 restriction.
+ if (DestTy->isFloatingPointTy() && !DestTy->isPPC_FP128Ty())
+ return ConstantFP::get(DestTy->getContext(),
+ APFloat(DestTy->getFltSemantics(),
+ CI->getValue()));
+
+ // Otherwise, can't fold this (vector?)
+ return nullptr;
+ }
+
+ // Handle ConstantFP input: FP -> Integral.
+ if (ConstantFP *FP = dyn_cast<ConstantFP>(V)) {
+ // PPC_FP128 is really the sum of two consecutive doubles, where the first
+ // double is always stored first in memory, regardless of the target
+ // endianness. The memory layout of i128, however, depends on the target
+ // endianness, and so we can't fold this without target endianness
+ // information. This should instead be handled by
+ // Analysis/ConstantFolding.cpp
+ if (FP->getType()->isPPC_FP128Ty())
+ return nullptr;
+
+ // Make sure dest type is compatible with the folded integer constant.
+ if (!DestTy->isIntegerTy())
+ return nullptr;
+
+ return ConstantInt::get(FP->getContext(),
+ FP->getValueAPF().bitcastToAPInt());
+ }
+
+ return nullptr;
+}
+
+
+/// V is an integer constant which only has a subset of its bytes used.
+/// The bytes used are indicated by ByteStart (which is the first byte used,
+/// counting from the least significant byte) and ByteSize, which is the number
+/// of bytes used.
+///
+/// This function analyzes the specified constant to see if the specified byte
+/// range can be returned as a simplified constant. If so, the constant is
+/// returned, otherwise null is returned.
+static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
+ unsigned ByteSize) {
+ assert(C->getType()->isIntegerTy() &&
+ (cast<IntegerType>(C->getType())->getBitWidth() & 7) == 0 &&
+ "Non-byte sized integer input");
+ unsigned CSize = cast<IntegerType>(C->getType())->getBitWidth()/8;
+ assert(ByteSize && "Must be accessing some piece");
+ assert(ByteStart+ByteSize <= CSize && "Extracting invalid piece from input");
+ assert(ByteSize != CSize && "Should not extract everything");
+
+ // Constant Integers are simple.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ APInt V = CI->getValue();
+ if (ByteStart)
+ V.lshrInPlace(ByteStart*8);
+ V = V.trunc(ByteSize*8);
+ return ConstantInt::get(CI->getContext(), V);
+ }
+
+ // In the input is a constant expr, we might be able to recursively simplify.
+ // If not, we definitely can't do anything.
+ ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
+ if (!CE) return nullptr;
+
+ switch (CE->getOpcode()) {
+ default: return nullptr;
+ case Instruction::Or: {
+ Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
+ if (!RHS)
+ return nullptr;
+
+ // X | -1 -> -1.
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
+ if (RHSC->isMinusOne())
+ return RHSC;
+
+ Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
+ if (!LHS)
+ return nullptr;
+ return ConstantExpr::getOr(LHS, RHS);
+ }
+ case Instruction::And: {
+ Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
+ if (!RHS)
+ return nullptr;
+
+ // X & 0 -> 0.
+ if (RHS->isNullValue())
+ return RHS;
+
+ Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
+ if (!LHS)
+ return nullptr;
+ return ConstantExpr::getAnd(LHS, RHS);
+ }
+ case Instruction::LShr: {
+ ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
+ if (!Amt)
+ return nullptr;
+ APInt ShAmt = Amt->getValue();
+ // Cannot analyze non-byte shifts.
+ if ((ShAmt & 7) != 0)
+ return nullptr;
+ ShAmt.lshrInPlace(3);
+
+ // If the extract is known to be all zeros, return zero.
+ if (ShAmt.uge(CSize - ByteStart))
+ return Constant::getNullValue(
+ IntegerType::get(CE->getContext(), ByteSize * 8));
+ // If the extract is known to be fully in the input, extract it.
+ if (ShAmt.ule(CSize - (ByteStart + ByteSize)))
+ return ExtractConstantBytes(CE->getOperand(0),
+ ByteStart + ShAmt.getZExtValue(), ByteSize);
+
+ // TODO: Handle the 'partially zero' case.
+ return nullptr;
+ }
+
+ case Instruction::Shl: {
+ ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
+ if (!Amt)
+ return nullptr;
+ APInt ShAmt = Amt->getValue();
+ // Cannot analyze non-byte shifts.
+ if ((ShAmt & 7) != 0)
+ return nullptr;
+ ShAmt.lshrInPlace(3);
+
+ // If the extract is known to be all zeros, return zero.
+ if (ShAmt.uge(ByteStart + ByteSize))
+ return Constant::getNullValue(
+ IntegerType::get(CE->getContext(), ByteSize * 8));
+ // If the extract is known to be fully in the input, extract it.
+ if (ShAmt.ule(ByteStart))
+ return ExtractConstantBytes(CE->getOperand(0),
+ ByteStart - ShAmt.getZExtValue(), ByteSize);
+
+ // TODO: Handle the 'partially zero' case.
+ return nullptr;
+ }
+
+ case Instruction::ZExt: {
+ unsigned SrcBitSize =
+ cast<IntegerType>(CE->getOperand(0)->getType())->getBitWidth();
+
+ // If extracting something that is completely zero, return 0.
+ if (ByteStart*8 >= SrcBitSize)
+ return Constant::getNullValue(IntegerType::get(CE->getContext(),
+ ByteSize*8));
+
+ // If exactly extracting the input, return it.
+ if (ByteStart == 0 && ByteSize*8 == SrcBitSize)
+ return CE->getOperand(0);
+
+ // If extracting something completely in the input, if the input is a
+ // multiple of 8 bits, recurse.
+ if ((SrcBitSize&7) == 0 && (ByteStart+ByteSize)*8 <= SrcBitSize)
+ return ExtractConstantBytes(CE->getOperand(0), ByteStart, ByteSize);
+
+ // Otherwise, if extracting a subset of the input, which is not multiple of
+ // 8 bits, do a shift and trunc to get the bits.
+ if ((ByteStart+ByteSize)*8 < SrcBitSize) {
+ assert((SrcBitSize&7) && "Shouldn't get byte sized case here");
+ Constant *Res = CE->getOperand(0);
+ if (ByteStart)
+ Res = ConstantExpr::getLShr(Res,
+ ConstantInt::get(Res->getType(), ByteStart*8));
+ return ConstantExpr::getTrunc(Res, IntegerType::get(C->getContext(),
+ ByteSize*8));
+ }
+
+ // TODO: Handle the 'partially zero' case.
+ return nullptr;
+ }
+ }
+}
+
+Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
+ Type *DestTy) {
+ if (isa<PoisonValue>(V))
+ return PoisonValue::get(DestTy);
+
+ if (isa<UndefValue>(V)) {
+ // zext(undef) = 0, because the top bits will be zero.
+ // sext(undef) = 0, because the top bits will all be the same.
+ // [us]itofp(undef) = 0, because the result value is bounded.
+ if (opc == Instruction::ZExt || opc == Instruction::SExt ||
+ opc == Instruction::UIToFP || opc == Instruction::SIToFP)
+ return Constant::getNullValue(DestTy);
+ return UndefValue::get(DestTy);
+ }
+
+ if (V->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() &&
+ opc != Instruction::AddrSpaceCast)
+ return Constant::getNullValue(DestTy);
+
+ // If the cast operand is a constant expression, there's a few things we can
+ // do to try to simplify it.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ if (CE->isCast()) {
+ // Try hard to fold cast of cast because they are often eliminable.
+ if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy))
+ return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy);
+ } else if (CE->getOpcode() == Instruction::GetElementPtr &&
+ // Do not fold addrspacecast (gep 0, .., 0). It might make the
+ // addrspacecast uncanonicalized.
+ opc != Instruction::AddrSpaceCast &&
+ // Do not fold bitcast (gep) with inrange index, as this loses
+ // information.
+ !cast<GEPOperator>(CE)->getInRangeIndex() &&
+ // Do not fold if the gep type is a vector, as bitcasting
+ // operand 0 of a vector gep will result in a bitcast between
+ // different sizes.
+ !CE->getType()->isVectorTy()) {
+ // If all of the indexes in the GEP are null values, there is no pointer
+ // adjustment going on. We might as well cast the source pointer.
+ bool isAllNull = true;
+ for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
+ if (!CE->getOperand(i)->isNullValue()) {
+ isAllNull = false;
+ break;
+ }
+ if (isAllNull)
+ // This is casting one pointer type to another, always BitCast
+ return ConstantExpr::getPointerCast(CE->getOperand(0), DestTy);
+ }
+ }
+
+ // If the cast operand is a constant vector, perform the cast by
+ // operating on each element. In the cast of bitcasts, the element
+ // count may be mismatched; don't attempt to handle that here.
+ if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) &&
+ DestTy->isVectorTy() &&
+ cast<FixedVectorType>(DestTy)->getNumElements() ==
+ cast<FixedVectorType>(V->getType())->getNumElements()) {
+ VectorType *DestVecTy = cast<VectorType>(DestTy);
+ Type *DstEltTy = DestVecTy->getElementType();
+ // Fast path for splatted constants.
+ if (Constant *Splat = V->getSplatValue()) {
+ return ConstantVector::getSplat(
+ cast<VectorType>(DestTy)->getElementCount(),
+ ConstantExpr::getCast(opc, Splat, DstEltTy));
+ }
+ SmallVector<Constant *, 16> res;
+ Type *Ty = IntegerType::get(V->getContext(), 32);
+ for (unsigned i = 0,
+ e = cast<FixedVectorType>(V->getType())->getNumElements();
+ i != e; ++i) {
+ Constant *C =
+ ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
+ res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
+ }
+ return ConstantVector::get(res);
+ }
+
+ // We actually have to do a cast now. Perform the cast according to the
+ // opcode specified.
+ switch (opc) {
+ default:
+ llvm_unreachable("Failed to cast constant expression");
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
+ bool ignored;
+ APFloat Val = FPC->getValueAPF();
+ Val.convert(DestTy->getFltSemantics(), APFloat::rmNearestTiesToEven,
+ &ignored);
+ return ConstantFP::get(V->getContext(), Val);
+ }
+ return nullptr; // Can't fold.
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
+ const APFloat &V = FPC->getValueAPF();
+ bool ignored;
+ uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ APSInt IntVal(DestBitWidth, opc == Instruction::FPToUI);
+ if (APFloat::opInvalidOp ==
+ V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored)) {
+ // Undefined behavior invoked - the destination type can't represent
+ // the input constant.
+ return PoisonValue::get(DestTy);
+ }
+ return ConstantInt::get(FPC->getContext(), IntVal);
+ }
+ return nullptr; // Can't fold.
+ case Instruction::IntToPtr: //always treated as unsigned
+ if (V->isNullValue()) // Is it an integral null value?
+ return ConstantPointerNull::get(cast<PointerType>(DestTy));
+ return nullptr; // Other pointer types cannot be casted
+ case Instruction::PtrToInt: // always treated as unsigned
+ // Is it a null pointer value?
+ if (V->isNullValue())
+ return ConstantInt::get(DestTy, 0);
+ // Other pointer types cannot be casted
+ return nullptr;
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ const APInt &api = CI->getValue();
+ APFloat apf(DestTy->getFltSemantics(),
+ APInt::getZero(DestTy->getPrimitiveSizeInBits()));
+ apf.convertFromAPInt(api, opc==Instruction::SIToFP,
+ APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(V->getContext(), apf);
+ }
+ return nullptr;
+ case Instruction::ZExt:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().zext(BitWidth));
+ }
+ return nullptr;
+ case Instruction::SExt:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().sext(BitWidth));
+ }
+ return nullptr;
+ case Instruction::Trunc: {
+ if (V->getType()->isVectorTy())
+ return nullptr;
+
+ uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ return ConstantInt::get(V->getContext(),
+ CI->getValue().trunc(DestBitWidth));
+ }
+
+ // The input must be a constantexpr. See if we can simplify this based on
+ // the bytes we are demanding. Only do this if the source and dest are an
+ // even multiple of a byte.
+ if ((DestBitWidth & 7) == 0 &&
+ (cast<IntegerType>(V->getType())->getBitWidth() & 7) == 0)
+ if (Constant *Res = ExtractConstantBytes(V, 0, DestBitWidth / 8))
+ return Res;
+
+ return nullptr;
+ }
+ case Instruction::BitCast:
+ return FoldBitCast(V, DestTy);
+ case Instruction::AddrSpaceCast:
+ return nullptr;
+ }
+}
+
+Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
+ Constant *V1, Constant *V2) {
+ // Check for i1 and vector true/false conditions.
+ if (Cond->isNullValue()) return V2;
+ if (Cond->isAllOnesValue()) return V1;
+
+ // If the condition is a vector constant, fold the result elementwise.
+ if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
+ auto *V1VTy = CondV->getType();
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(CondV->getContext(), 32);
+ for (unsigned i = 0, e = V1VTy->getNumElements(); i != e; ++i) {
+ Constant *V;
+ Constant *V1Element = ConstantExpr::getExtractElement(V1,
+ ConstantInt::get(Ty, i));
+ Constant *V2Element = ConstantExpr::getExtractElement(V2,
+ ConstantInt::get(Ty, i));
+ auto *Cond = cast<Constant>(CondV->getOperand(i));
+ if (isa<PoisonValue>(Cond)) {
+ V = PoisonValue::get(V1Element->getType());
+ } else if (V1Element == V2Element) {
+ V = V1Element;
+ } else if (isa<UndefValue>(Cond)) {
+ V = isa<UndefValue>(V1Element) ? V1Element : V2Element;
+ } else {
+ if (!isa<ConstantInt>(Cond)) break;
+ V = Cond->isNullValue() ? V2Element : V1Element;
+ }
+ Result.push_back(V);
+ }
+
+ // If we were able to build the vector, return it.
+ if (Result.size() == V1VTy->getNumElements())
+ return ConstantVector::get(Result);
+ }
+
+ if (isa<PoisonValue>(Cond))
+ return PoisonValue::get(V1->getType());
+
+ if (isa<UndefValue>(Cond)) {
+ if (isa<UndefValue>(V1)) return V1;
+ return V2;
+ }
+
+ if (V1 == V2) return V1;
+
+ if (isa<PoisonValue>(V1))
+ return V2;
+ if (isa<PoisonValue>(V2))
+ return V1;
+
+ // If the true or false value is undef, we can fold to the other value as
+ // long as the other value isn't poison.
+ auto NotPoison = [](Constant *C) {
+ if (isa<PoisonValue>(C))
+ return false;
+
+ // TODO: We can analyze ConstExpr by opcode to determine if there is any
+ // possibility of poison.
+ if (isa<ConstantExpr>(C))
+ return false;
+
+ if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(C) ||
+ isa<ConstantPointerNull>(C) || isa<Function>(C))
+ return true;
+
+ if (C->getType()->isVectorTy())
+ return !C->containsPoisonElement() && !C->containsConstantExpression();
+
+ // TODO: Recursively analyze aggregates or other constants.
+ return false;
+ };
+ if (isa<UndefValue>(V1) && NotPoison(V2)) return V2;
+ if (isa<UndefValue>(V2) && NotPoison(V1)) return V1;
+
+ if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) {
+ if (TrueVal->getOpcode() == Instruction::Select)
+ if (TrueVal->getOperand(0) == Cond)
+ return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
+ }
+ if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) {
+ if (FalseVal->getOpcode() == Instruction::Select)
+ if (FalseVal->getOperand(0) == Cond)
+ return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
+ }
+
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
+ Constant *Idx) {
+ auto *ValVTy = cast<VectorType>(Val->getType());
+
+ // extractelt poison, C -> poison
+ // extractelt C, undef -> poison
+ if (isa<PoisonValue>(Val) || isa<UndefValue>(Idx))
+ return PoisonValue::get(ValVTy->getElementType());
+
+ // extractelt undef, C -> undef
+ if (isa<UndefValue>(Val))
+ return UndefValue::get(ValVTy->getElementType());
+
+ auto *CIdx = dyn_cast<ConstantInt>(Idx);
+ if (!CIdx)
+ return nullptr;
+
+ if (auto *ValFVTy = dyn_cast<FixedVectorType>(Val->getType())) {
+ // ee({w,x,y,z}, wrong_value) -> poison
+ if (CIdx->uge(ValFVTy->getNumElements()))
+ return PoisonValue::get(ValFVTy->getElementType());
+ }
+
+ // ee (gep (ptr, idx0, ...), idx) -> gep (ee (ptr, idx), ee (idx0, idx), ...)
+ if (auto *CE = dyn_cast<ConstantExpr>(Val)) {
+ if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
+ SmallVector<Constant *, 8> Ops;
+ Ops.reserve(CE->getNumOperands());
+ for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
+ Constant *Op = CE->getOperand(i);
+ if (Op->getType()->isVectorTy()) {
+ Constant *ScalarOp = ConstantExpr::getExtractElement(Op, Idx);
+ if (!ScalarOp)
+ return nullptr;
+ Ops.push_back(ScalarOp);
+ } else
+ Ops.push_back(Op);
+ }
+ return CE->getWithOperands(Ops, ValVTy->getElementType(), false,
+ GEP->getSourceElementType());
+ } else if (CE->getOpcode() == Instruction::InsertElement) {
+ if (const auto *IEIdx = dyn_cast<ConstantInt>(CE->getOperand(2))) {
+ if (APSInt::isSameValue(APSInt(IEIdx->getValue()),
+ APSInt(CIdx->getValue()))) {
+ return CE->getOperand(1);
+ } else {
+ return ConstantExpr::getExtractElement(CE->getOperand(0), CIdx);
+ }
+ }
+ }
+ }
+
+ if (Constant *C = Val->getAggregateElement(CIdx))
+ return C;
+
+ // Lane < Splat minimum vector width => extractelt Splat(x), Lane -> x
+ if (CIdx->getValue().ult(ValVTy->getElementCount().getKnownMinValue())) {
+ if (Constant *SplatVal = Val->getSplatValue())
+ return SplatVal;
+ }
+
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
+ Constant *Elt,
+ Constant *Idx) {
+ if (isa<UndefValue>(Idx))
+ return PoisonValue::get(Val->getType());
+
+ // Inserting null into all zeros is still all zeros.
+ // TODO: This is true for undef and poison splats too.
+ if (isa<ConstantAggregateZero>(Val) && Elt->isNullValue())
+ return Val;
+
+ ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx);
+ if (!CIdx) return nullptr;
+
+ // Do not iterate on scalable vector. The num of elements is unknown at
+ // compile-time.
+ if (isa<ScalableVectorType>(Val->getType()))
+ return nullptr;
+
+ auto *ValTy = cast<FixedVectorType>(Val->getType());
+
+ unsigned NumElts = ValTy->getNumElements();
+ if (CIdx->uge(NumElts))
+ return PoisonValue::get(Val->getType());
+
+ SmallVector<Constant*, 16> Result;
+ Result.reserve(NumElts);
+ auto *Ty = Type::getInt32Ty(Val->getContext());
+ uint64_t IdxVal = CIdx->getZExtValue();
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (i == IdxVal) {
+ Result.push_back(Elt);
+ continue;
+ }
+
+ Constant *C = ConstantExpr::getExtractElement(Val, ConstantInt::get(Ty, i));
+ Result.push_back(C);
+ }
+
+ return ConstantVector::get(Result);
+}
+
+Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
+ ArrayRef<int> Mask) {
+ auto *V1VTy = cast<VectorType>(V1->getType());
+ unsigned MaskNumElts = Mask.size();
+ auto MaskEltCount =
+ ElementCount::get(MaskNumElts, isa<ScalableVectorType>(V1VTy));
+ Type *EltTy = V1VTy->getElementType();
+
+ // Undefined shuffle mask -> undefined value.
+ if (all_of(Mask, [](int Elt) { return Elt == UndefMaskElem; })) {
+ return UndefValue::get(VectorType::get(EltTy, MaskEltCount));
+ }
+
+ // If the mask is all zeros this is a splat, no need to go through all
+ // elements.
+ if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
+ Type *Ty = IntegerType::get(V1->getContext(), 32);
+ Constant *Elt =
+ ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, 0));
+
+ if (Elt->isNullValue()) {
+ auto *VTy = VectorType::get(EltTy, MaskEltCount);
+ return ConstantAggregateZero::get(VTy);
+ } else if (!MaskEltCount.isScalable())
+ return ConstantVector::getSplat(MaskEltCount, Elt);
+ }
+ // Do not iterate on scalable vector. The num of elements is unknown at
+ // compile-time.
+ if (isa<ScalableVectorType>(V1VTy))
+ return nullptr;
+
+ unsigned SrcNumElts = V1VTy->getElementCount().getKnownMinValue();
+
+ // Loop over the shuffle mask, evaluating each element.
+ SmallVector<Constant*, 32> Result;
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ int Elt = Mask[i];
+ if (Elt == -1) {
+ Result.push_back(UndefValue::get(EltTy));
+ continue;
+ }
+ Constant *InElt;
+ if (unsigned(Elt) >= SrcNumElts*2)
+ InElt = UndefValue::get(EltTy);
+ else if (unsigned(Elt) >= SrcNumElts) {
+ Type *Ty = IntegerType::get(V2->getContext(), 32);
+ InElt =
+ ConstantExpr::getExtractElement(V2,
+ ConstantInt::get(Ty, Elt - SrcNumElts));
+ } else {
+ Type *Ty = IntegerType::get(V1->getContext(), 32);
+ InElt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, Elt));
+ }
+ Result.push_back(InElt);
+ }
+
+ return ConstantVector::get(Result);
+}
+
+Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
+ ArrayRef<unsigned> Idxs) {
+ // Base case: no indices, so return the entire value.
+ if (Idxs.empty())
+ return Agg;
+
+ if (Constant *C = Agg->getAggregateElement(Idxs[0]))
+ return ConstantFoldExtractValueInstruction(C, Idxs.slice(1));
+
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
+ Constant *Val,
+ ArrayRef<unsigned> Idxs) {
+ // Base case: no indices, so replace the entire value.
+ if (Idxs.empty())
+ return Val;
+
+ unsigned NumElts;
+ if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
+ NumElts = ST->getNumElements();
+ else
+ NumElts = cast<ArrayType>(Agg->getType())->getNumElements();
+
+ SmallVector<Constant*, 32> Result;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C = Agg->getAggregateElement(i);
+ if (!C) return nullptr;
+
+ if (Idxs[0] == i)
+ C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1));
+
+ Result.push_back(C);
+ }
+
+ if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
+ return ConstantStruct::get(ST, Result);
+ return ConstantArray::get(cast<ArrayType>(Agg->getType()), Result);
+}
+
+Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) {
+ assert(Instruction::isUnaryOp(Opcode) && "Non-unary instruction detected");
+
+ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length
+ // vectors are always evaluated per element.
+ bool IsScalableVector = isa<ScalableVectorType>(C->getType());
+ bool HasScalarUndefOrScalableVectorUndef =
+ (!C->getType()->isVectorTy() || IsScalableVector) && isa<UndefValue>(C);
+
+ if (HasScalarUndefOrScalableVectorUndef) {
+ switch (static_cast<Instruction::UnaryOps>(Opcode)) {
+ case Instruction::FNeg:
+ return C; // -undef -> undef
+ case Instruction::UnaryOpsEnd:
+ llvm_unreachable("Invalid UnaryOp");
+ }
+ }
+
+ // Constant should not be UndefValue, unless these are vector constants.
+ assert(!HasScalarUndefOrScalableVectorUndef && "Unexpected UndefValue");
+ // We only have FP UnaryOps right now.
+ assert(!isa<ConstantInt>(C) && "Unexpected Integer UnaryOp");
+
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ const APFloat &CV = CFP->getValueAPF();
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::FNeg:
+ return ConstantFP::get(C->getContext(), neg(CV));
+ }
+ } else if (auto *VTy = dyn_cast<FixedVectorType>(C->getType())) {
+
+ Type *Ty = IntegerType::get(VTy->getContext(), 32);
+ // Fast path for splatted constants.
+ if (Constant *Splat = C->getSplatValue()) {
+ Constant *Elt = ConstantExpr::get(Opcode, Splat);
+ return ConstantVector::getSplat(VTy->getElementCount(), Elt);
+ }
+
+ // Fold each element and create a vector constant from those constants.
+ SmallVector<Constant *, 16> Result;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
+ Constant *ExtractIdx = ConstantInt::get(Ty, i);
+ Constant *Elt = ConstantExpr::getExtractElement(C, ExtractIdx);
+
+ Result.push_back(ConstantExpr::get(Opcode, Elt));
+ }
+
+ return ConstantVector::get(Result);
+ }
+
+ // We don't know how to fold this.
+ return nullptr;
+}
+
+Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
+ Constant *C2) {
+ assert(Instruction::isBinaryOp(Opcode) && "Non-binary instruction detected");
+
+ // Simplify BinOps with their identity values first. They are no-ops and we
+ // can always return the other value, including undef or poison values.
+ // FIXME: remove unnecessary duplicated identity patterns below.
+ // FIXME: Use AllowRHSConstant with getBinOpIdentity to handle additional ops,
+ // like X << 0 = X.
+ Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, C1->getType());
+ if (Identity) {
+ if (C1 == Identity)
+ return C2;
+ if (C2 == Identity)
+ return C1;
+ }
+
+ // Binary operations propagate poison.
+ if (isa<PoisonValue>(C1) || isa<PoisonValue>(C2))
+ return PoisonValue::get(C1->getType());
+
+ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length
+ // vectors are always evaluated per element.
+ bool IsScalableVector = isa<ScalableVectorType>(C1->getType());
+ bool HasScalarUndefOrScalableVectorUndef =
+ (!C1->getType()->isVectorTy() || IsScalableVector) &&
+ (isa<UndefValue>(C1) || isa<UndefValue>(C2));
+ if (HasScalarUndefOrScalableVectorUndef) {
+ switch (static_cast<Instruction::BinaryOps>(Opcode)) {
+ case Instruction::Xor:
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
+ // Handle undef ^ undef -> 0 special case. This is a common
+ // idiom (misuse).
+ return Constant::getNullValue(C1->getType());
+ LLVM_FALLTHROUGH;
+ case Instruction::Add:
+ case Instruction::Sub:
+ return UndefValue::get(C1->getType());
+ case Instruction::And:
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef & undef -> undef
+ return C1;
+ return Constant::getNullValue(C1->getType()); // undef & X -> 0
+ case Instruction::Mul: {
+ // undef * undef -> undef
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
+ return C1;
+ const APInt *CV;
+ // X * undef -> undef if X is odd
+ if (match(C1, m_APInt(CV)) || match(C2, m_APInt(CV)))
+ if ((*CV)[0])
+ return UndefValue::get(C1->getType());
+
+ // X * undef -> 0 otherwise
+ return Constant::getNullValue(C1->getType());
+ }
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ // X / undef -> poison
+ // X / 0 -> poison
+ if (match(C2, m_CombineOr(m_Undef(), m_Zero())))
+ return PoisonValue::get(C2->getType());
+ // undef / 1 -> undef
+ if (match(C2, m_One()))
+ return C1;
+ // undef / X -> 0 otherwise
+ return Constant::getNullValue(C1->getType());
+ case Instruction::URem:
+ case Instruction::SRem:
+ // X % undef -> poison
+ // X % 0 -> poison
+ if (match(C2, m_CombineOr(m_Undef(), m_Zero())))
+ return PoisonValue::get(C2->getType());
+ // undef % X -> 0 otherwise
+ return Constant::getNullValue(C1->getType());
+ case Instruction::Or: // X | undef -> -1
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef | undef -> undef
+ return C1;
+ return Constant::getAllOnesValue(C1->getType()); // undef | X -> ~0
+ case Instruction::LShr:
+ // X >>l undef -> poison
+ if (isa<UndefValue>(C2))
+ return PoisonValue::get(C2->getType());
+ // undef >>l 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // undef >>l X -> 0
+ return Constant::getNullValue(C1->getType());
+ case Instruction::AShr:
+ // X >>a undef -> poison
+ if (isa<UndefValue>(C2))
+ return PoisonValue::get(C2->getType());
+ // undef >>a 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // TODO: undef >>a X -> poison if the shift is exact
+ // undef >>a X -> 0
+ return Constant::getNullValue(C1->getType());
+ case Instruction::Shl:
+ // X << undef -> undef
+ if (isa<UndefValue>(C2))
+ return PoisonValue::get(C2->getType());
+ // undef << 0 -> undef
+ if (match(C2, m_Zero()))
+ return C1;
+ // undef << X -> 0
+ return Constant::getNullValue(C1->getType());
+ case Instruction::FSub:
+ // -0.0 - undef --> undef (consistent with "fneg undef")
+ if (match(C1, m_NegZeroFP()) && isa<UndefValue>(C2))
+ return C2;
+ LLVM_FALLTHROUGH;
+ case Instruction::FAdd:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ // [any flop] undef, undef -> undef
+ if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
+ return C1;
+ // [any flop] C, undef -> NaN
+ // [any flop] undef, C -> NaN
+ // We could potentially specialize NaN/Inf constants vs. 'normal'
+ // constants (possibly differently depending on opcode and operand). This
+ // would allow returning undef sometimes. But it is always safe to fold to
+ // NaN because we can choose the undef operand as NaN, and any FP opcode
+ // with a NaN operand will propagate NaN.
+ return ConstantFP::getNaN(C1->getType());
+ case Instruction::BinaryOpsEnd:
+ llvm_unreachable("Invalid BinaryOp");
+ }
+ }
+
+ // Neither constant should be UndefValue, unless these are vector constants.
+ assert((!HasScalarUndefOrScalableVectorUndef) && "Unexpected UndefValue");
+
+ // Handle simplifications when the RHS is a constant int.
+ if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
+ switch (Opcode) {
+ case Instruction::Add:
+ if (CI2->isZero()) return C1; // X + 0 == X
+ break;
+ case Instruction::Sub:
+ if (CI2->isZero()) return C1; // X - 0 == X
+ break;
+ case Instruction::Mul:
+ if (CI2->isZero()) return C2; // X * 0 == 0
+ if (CI2->isOne())
+ return C1; // X * 1 == X
+ break;
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ if (CI2->isOne())
+ return C1; // X / 1 == X
+ if (CI2->isZero())
+ return PoisonValue::get(CI2->getType()); // X / 0 == poison
+ break;
+ case Instruction::URem:
+ case Instruction::SRem:
+ if (CI2->isOne())
+ return Constant::getNullValue(CI2->getType()); // X % 1 == 0
+ if (CI2->isZero())
+ return PoisonValue::get(CI2->getType()); // X % 0 == poison
+ break;
+ case Instruction::And:
+ if (CI2->isZero()) return C2; // X & 0 == 0
+ if (CI2->isMinusOne())
+ return C1; // X & -1 == X
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ // (zext i32 to i64) & 4294967295 -> (zext i32 to i64)
+ if (CE1->getOpcode() == Instruction::ZExt) {
+ unsigned DstWidth = CI2->getType()->getBitWidth();
+ unsigned SrcWidth =
+ CE1->getOperand(0)->getType()->getPrimitiveSizeInBits();
+ APInt PossiblySetBits(APInt::getLowBitsSet(DstWidth, SrcWidth));
+ if ((PossiblySetBits & CI2->getValue()) == PossiblySetBits)
+ return C1;
+ }
+
+ // If and'ing the address of a global with a constant, fold it.
+ if (CE1->getOpcode() == Instruction::PtrToInt &&
+ isa<GlobalValue>(CE1->getOperand(0))) {
+ GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
+
+ MaybeAlign GVAlign;
+
+ if (Module *TheModule = GV->getParent()) {
+ const DataLayout &DL = TheModule->getDataLayout();
+ GVAlign = GV->getPointerAlignment(DL);
+
+ // If the function alignment is not specified then assume that it
+ // is 4.
+ // This is dangerous; on x86, the alignment of the pointer
+ // corresponds to the alignment of the function, but might be less
+ // than 4 if it isn't explicitly specified.
+ // However, a fix for this behaviour was reverted because it
+ // increased code size (see https://reviews.llvm.org/D55115)
+ // FIXME: This code should be deleted once existing targets have
+ // appropriate defaults
+ if (isa<Function>(GV) && !DL.getFunctionPtrAlign())
+ GVAlign = Align(4);
+ } else if (isa<Function>(GV)) {
+ // Without a datalayout we have to assume the worst case: that the
+ // function pointer isn't aligned at all.
+ GVAlign = llvm::None;
+ } else if (isa<GlobalVariable>(GV)) {
+ GVAlign = cast<GlobalVariable>(GV)->getAlign();
+ }
+
+ if (GVAlign && *GVAlign > 1) {
+ unsigned DstWidth = CI2->getType()->getBitWidth();
+ unsigned SrcWidth = std::min(DstWidth, Log2(*GVAlign));
+ APInt BitsNotSet(APInt::getLowBitsSet(DstWidth, SrcWidth));
+
+ // If checking bits we know are clear, return zero.
+ if ((CI2->getValue() & BitsNotSet) == CI2->getValue())
+ return Constant::getNullValue(CI2->getType());
+ }
+ }
+ }
+ break;
+ case Instruction::Or:
+ if (CI2->isZero()) return C1; // X | 0 == X
+ if (CI2->isMinusOne())
+ return C2; // X | -1 == -1
+ break;
+ case Instruction::Xor:
+ if (CI2->isZero()) return C1; // X ^ 0 == X
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ switch (CE1->getOpcode()) {
+ default: break;
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ // cmp pred ^ true -> cmp !pred
+ assert(CI2->isOne());
+ CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
+ pred = CmpInst::getInversePredicate(pred);
+ return ConstantExpr::getCompare(pred, CE1->getOperand(0),
+ CE1->getOperand(1));
+ }
+ }
+ break;
+ case Instruction::AShr:
+ // ashr (zext C to Ty), C2 -> lshr (zext C, CSA), C2
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1))
+ if (CE1->getOpcode() == Instruction::ZExt) // Top bits known zero.
+ return ConstantExpr::getLShr(C1, C2);
+ break;
+ }
+ } else if (isa<ConstantInt>(C1)) {
+ // If C1 is a ConstantInt and C2 is not, swap the operands.
+ if (Instruction::isCommutative(Opcode))
+ return ConstantExpr::get(Opcode, C2, C1);
+ }
+
+ if (ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) {
+ if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
+ const APInt &C1V = CI1->getValue();
+ const APInt &C2V = CI2->getValue();
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::Add:
+ return ConstantInt::get(CI1->getContext(), C1V + C2V);
+ case Instruction::Sub:
+ return ConstantInt::get(CI1->getContext(), C1V - C2V);
+ case Instruction::Mul:
+ return ConstantInt::get(CI1->getContext(), C1V * C2V);
+ case Instruction::UDiv:
+ assert(!CI2->isZero() && "Div by zero handled above");
+ return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
+ case Instruction::SDiv:
+ assert(!CI2->isZero() && "Div by zero handled above");
+ if (C2V.isAllOnes() && C1V.isMinSignedValue())
+ return PoisonValue::get(CI1->getType()); // MIN_INT / -1 -> poison
+ return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
+ case Instruction::URem:
+ assert(!CI2->isZero() && "Div by zero handled above");
+ return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
+ case Instruction::SRem:
+ assert(!CI2->isZero() && "Div by zero handled above");
+ if (C2V.isAllOnes() && C1V.isMinSignedValue())
+ return PoisonValue::get(CI1->getType()); // MIN_INT % -1 -> poison
+ return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
+ case Instruction::And:
+ return ConstantInt::get(CI1->getContext(), C1V & C2V);
+ case Instruction::Or:
+ return ConstantInt::get(CI1->getContext(), C1V | C2V);
+ case Instruction::Xor:
+ return ConstantInt::get(CI1->getContext(), C1V ^ C2V);
+ case Instruction::Shl:
+ if (C2V.ult(C1V.getBitWidth()))
+ return ConstantInt::get(CI1->getContext(), C1V.shl(C2V));
+ return PoisonValue::get(C1->getType()); // too big shift is poison
+ case Instruction::LShr:
+ if (C2V.ult(C1V.getBitWidth()))
+ return ConstantInt::get(CI1->getContext(), C1V.lshr(C2V));
+ return PoisonValue::get(C1->getType()); // too big shift is poison
+ case Instruction::AShr:
+ if (C2V.ult(C1V.getBitWidth()))
+ return ConstantInt::get(CI1->getContext(), C1V.ashr(C2V));
+ return PoisonValue::get(C1->getType()); // too big shift is poison
+ }
+ }
+
+ switch (Opcode) {
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Shl:
+ if (CI1->isZero()) return C1;
+ break;
+ default:
+ break;
+ }
+ } else if (ConstantFP *CFP1 = dyn_cast<ConstantFP>(C1)) {
+ if (ConstantFP *CFP2 = dyn_cast<ConstantFP>(C2)) {
+ const APFloat &C1V = CFP1->getValueAPF();
+ const APFloat &C2V = CFP2->getValueAPF();
+ APFloat C3V = C1V; // copy for modification
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::FAdd:
+ (void)C3V.add(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FSub:
+ (void)C3V.subtract(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FMul:
+ (void)C3V.multiply(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FDiv:
+ (void)C3V.divide(C2V, APFloat::rmNearestTiesToEven);
+ return ConstantFP::get(C1->getContext(), C3V);
+ case Instruction::FRem:
+ (void)C3V.mod(C2V);
+ return ConstantFP::get(C1->getContext(), C3V);
+ }
+ }
+ } else if (auto *VTy = dyn_cast<VectorType>(C1->getType())) {
+ // Fast path for splatted constants.
+ if (Constant *C2Splat = C2->getSplatValue()) {
+ if (Instruction::isIntDivRem(Opcode) && C2Splat->isNullValue())
+ return PoisonValue::get(VTy);
+ if (Constant *C1Splat = C1->getSplatValue()) {
+ Constant *Res =
+ ConstantExpr::isDesirableBinOp(Opcode)
+ ? ConstantExpr::get(Opcode, C1Splat, C2Splat)
+ : ConstantFoldBinaryInstruction(Opcode, C1Splat, C2Splat);
+ if (!Res)
+ return nullptr;
+ return ConstantVector::getSplat(VTy->getElementCount(), Res);
+ }
+ }
+
+ if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
+ // Fold each element and create a vector constant from those constants.
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(FVTy->getContext(), 32);
+ for (unsigned i = 0, e = FVTy->getNumElements(); i != e; ++i) {
+ Constant *ExtractIdx = ConstantInt::get(Ty, i);
+ Constant *LHS = ConstantExpr::getExtractElement(C1, ExtractIdx);
+ Constant *RHS = ConstantExpr::getExtractElement(C2, ExtractIdx);
+
+ // If any element of a divisor vector is zero, the whole op is poison.
+ if (Instruction::isIntDivRem(Opcode) && RHS->isNullValue())
+ return PoisonValue::get(VTy);
+
+ Constant *Res = ConstantExpr::isDesirableBinOp(Opcode)
+ ? ConstantExpr::get(Opcode, LHS, RHS)
+ : ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
+ if (!Res)
+ return nullptr;
+ Result.push_back(Res);
+ }
+
+ return ConstantVector::get(Result);
+ }
+ }
+
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ // There are many possible foldings we could do here. We should probably
+ // at least fold add of a pointer with an integer into the appropriate
+ // getelementptr. This will improve alias analysis a bit.
+
+ // Given ((a + b) + c), if (b + c) folds to something interesting, return
+ // (a + (b + c)).
+ if (Instruction::isAssociative(Opcode) && CE1->getOpcode() == Opcode) {
+ Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2);
+ if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode)
+ return ConstantExpr::get(Opcode, CE1->getOperand(0), T);
+ }
+ } else if (isa<ConstantExpr>(C2)) {
+ // If C2 is a constant expr and C1 isn't, flop them around and fold the
+ // other way if possible.
+ if (Instruction::isCommutative(Opcode))
+ return ConstantFoldBinaryInstruction(Opcode, C2, C1);
+ }
+
+ // i1 can be simplified in many cases.
+ if (C1->getType()->isIntegerTy(1)) {
+ switch (Opcode) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ return ConstantExpr::getXor(C1, C2);
+ case Instruction::Mul:
+ return ConstantExpr::getAnd(C1, C2);
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ // We can assume that C2 == 0. If it were one the result would be
+ // undefined because the shift value is as large as the bitwidth.
+ return C1;
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ // We can assume that C2 == 1. If it were zero the result would be
+ // undefined through division by zero.
+ return C1;
+ case Instruction::URem:
+ case Instruction::SRem:
+ // We can assume that C2 == 1. If it were zero the result would be
+ // undefined through division by zero.
+ return ConstantInt::getFalse(C1->getContext());
+ default:
+ break;
+ }
+ }
+
+ // We don't know how to fold this.
+ return nullptr;
+}
+
+/// This function determines if there is anything we can decide about the two
+/// constants provided. This doesn't need to handle simple things like
+/// ConstantFP comparisons, but should instead handle ConstantExprs.
+/// If we can determine that the two constants have a particular relation to
+/// each other, we should return the corresponding FCmpInst predicate,
+/// otherwise return FCmpInst::BAD_FCMP_PREDICATE. This is used below in
+/// ConstantFoldCompareInstruction.
+///
+/// To simplify this code we canonicalize the relation so that the first
+/// operand is always the most "complex" of the two. We consider ConstantFP
+/// to be the simplest, and ConstantExprs to be the most complex.
+static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
+ assert(V1->getType() == V2->getType() &&
+ "Cannot compare values of different types!");
+
+ // We do not know if a constant expression will evaluate to a number or NaN.
+ // Therefore, we can only say that the relation is unordered or equal.
+ if (V1 == V2) return FCmpInst::FCMP_UEQ;
+
+ if (!isa<ConstantExpr>(V1)) {
+ if (!isa<ConstantExpr>(V2)) {
+ // Simple case, use the standard constant folder.
+ ConstantInt *R = nullptr;
+ R = dyn_cast<ConstantInt>(
+ ConstantExpr::getFCmp(FCmpInst::FCMP_OEQ, V1, V2));
+ if (R && !R->isZero())
+ return FCmpInst::FCMP_OEQ;
+ R = dyn_cast<ConstantInt>(
+ ConstantExpr::getFCmp(FCmpInst::FCMP_OLT, V1, V2));
+ if (R && !R->isZero())
+ return FCmpInst::FCMP_OLT;
+ R = dyn_cast<ConstantInt>(
+ ConstantExpr::getFCmp(FCmpInst::FCMP_OGT, V1, V2));
+ if (R && !R->isZero())
+ return FCmpInst::FCMP_OGT;
+
+ // Nothing more we can do
+ return FCmpInst::BAD_FCMP_PREDICATE;
+ }
+
+ // If the first operand is simple and second is ConstantExpr, swap operands.
+ FCmpInst::Predicate SwappedRelation = evaluateFCmpRelation(V2, V1);
+ if (SwappedRelation != FCmpInst::BAD_FCMP_PREDICATE)
+ return FCmpInst::getSwappedPredicate(SwappedRelation);
+ } else {
+ // Ok, the LHS is known to be a constantexpr. The RHS can be any of a
+ // constantexpr or a simple constant.
+ ConstantExpr *CE1 = cast<ConstantExpr>(V1);
+ switch (CE1->getOpcode()) {
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ // We might be able to do something with these but we don't right now.
+ break;
+ default:
+ break;
+ }
+ }
+ // There are MANY other foldings that we could perform here. They will
+ // probably be added on demand, as they seem needed.
+ return FCmpInst::BAD_FCMP_PREDICATE;
+}
+
+static ICmpInst::Predicate areGlobalsPotentiallyEqual(const GlobalValue *GV1,
+ const GlobalValue *GV2) {
+ auto isGlobalUnsafeForEquality = [](const GlobalValue *GV) {
+ if (GV->isInterposable() || GV->hasGlobalUnnamedAddr())
+ return true;
+ if (const auto *GVar = dyn_cast<GlobalVariable>(GV)) {
+ Type *Ty = GVar->getValueType();
+ // A global with opaque type might end up being zero sized.
+ if (!Ty->isSized())
+ return true;
+ // A global with an empty type might lie at the address of any other
+ // global.
+ if (Ty->isEmptyTy())
+ return true;
+ }
+ return false;
+ };
+ // Don't try to decide equality of aliases.
+ if (!isa<GlobalAlias>(GV1) && !isa<GlobalAlias>(GV2))
+ if (!isGlobalUnsafeForEquality(GV1) && !isGlobalUnsafeForEquality(GV2))
+ return ICmpInst::ICMP_NE;
+ return ICmpInst::BAD_ICMP_PREDICATE;
+}
+
+/// This function determines if there is anything we can decide about the two
+/// constants provided. This doesn't need to handle simple things like integer
+/// comparisons, but should instead handle ConstantExprs and GlobalValues.
+/// If we can determine that the two constants have a particular relation to
+/// each other, we should return the corresponding ICmp predicate, otherwise
+/// return ICmpInst::BAD_ICMP_PREDICATE.
+///
+/// To simplify this code we canonicalize the relation so that the first
+/// operand is always the most "complex" of the two. We consider simple
+/// constants (like ConstantInt) to be the simplest, followed by
+/// GlobalValues, followed by ConstantExpr's (the most complex).
+///
+static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
+ bool isSigned) {
+ assert(V1->getType() == V2->getType() &&
+ "Cannot compare different types of values!");
+ if (V1 == V2) return ICmpInst::ICMP_EQ;
+
+ if (!isa<ConstantExpr>(V1) && !isa<GlobalValue>(V1) &&
+ !isa<BlockAddress>(V1)) {
+ if (!isa<GlobalValue>(V2) && !isa<ConstantExpr>(V2) &&
+ !isa<BlockAddress>(V2)) {
+ // We distilled this down to a simple case, use the standard constant
+ // folder.
+ ConstantInt *R = nullptr;
+ ICmpInst::Predicate pred = ICmpInst::ICMP_EQ;
+ R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
+ if (R && !R->isZero())
+ return pred;
+ pred = isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
+ R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
+ if (R && !R->isZero())
+ return pred;
+ pred = isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
+ if (R && !R->isZero())
+ return pred;
+
+ // If we couldn't figure it out, bail.
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // If the first operand is simple, swap operands.
+ ICmpInst::Predicate SwappedRelation =
+ evaluateICmpRelation(V2, V1, isSigned);
+ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
+ return ICmpInst::getSwappedPredicate(SwappedRelation);
+
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V1)) {
+ if (isa<ConstantExpr>(V2)) { // Swap as necessary.
+ ICmpInst::Predicate SwappedRelation =
+ evaluateICmpRelation(V2, V1, isSigned);
+ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
+ return ICmpInst::getSwappedPredicate(SwappedRelation);
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // Now we know that the RHS is a GlobalValue, BlockAddress or simple
+ // constant (which, since the types must match, means that it's a
+ // ConstantPointerNull).
+ if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
+ return areGlobalsPotentiallyEqual(GV, GV2);
+ } else if (isa<BlockAddress>(V2)) {
+ return ICmpInst::ICMP_NE; // Globals never equal labels.
+ } else {
+ assert(isa<ConstantPointerNull>(V2) && "Canonicalization guarantee!");
+ // GlobalVals can never be null unless they have external weak linkage.
+ // We don't try to evaluate aliases here.
+ // NOTE: We should not be doing this constant folding if null pointer
+ // is considered valid for the function. But currently there is no way to
+ // query it from the Constant type.
+ if (!GV->hasExternalWeakLinkage() && !isa<GlobalAlias>(GV) &&
+ !NullPointerIsDefined(nullptr /* F */,
+ GV->getType()->getAddressSpace()))
+ return ICmpInst::ICMP_UGT;
+ }
+ } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(V1)) {
+ if (isa<ConstantExpr>(V2)) { // Swap as necessary.
+ ICmpInst::Predicate SwappedRelation =
+ evaluateICmpRelation(V2, V1, isSigned);
+ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
+ return ICmpInst::getSwappedPredicate(SwappedRelation);
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+
+ // Now we know that the RHS is a GlobalValue, BlockAddress or simple
+ // constant (which, since the types must match, means that it is a
+ // ConstantPointerNull).
+ if (const BlockAddress *BA2 = dyn_cast<BlockAddress>(V2)) {
+ // Block address in another function can't equal this one, but block
+ // addresses in the current function might be the same if blocks are
+ // empty.
+ if (BA2->getFunction() != BA->getFunction())
+ return ICmpInst::ICMP_NE;
+ } else {
+ // Block addresses aren't null, don't equal the address of globals.
+ assert((isa<ConstantPointerNull>(V2) || isa<GlobalValue>(V2)) &&
+ "Canonicalization guarantee!");
+ return ICmpInst::ICMP_NE;
+ }
+ } else {
+ // Ok, the LHS is known to be a constantexpr. The RHS can be any of a
+ // constantexpr, a global, block address, or a simple constant.
+ ConstantExpr *CE1 = cast<ConstantExpr>(V1);
+ Constant *CE1Op0 = CE1->getOperand(0);
+
+ switch (CE1->getOpcode()) {
+ case Instruction::Trunc:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ break; // We can't evaluate floating point casts or truncations.
+
+ case Instruction::BitCast:
+ // If this is a global value cast, check to see if the RHS is also a
+ // GlobalValue.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0))
+ if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2))
+ return areGlobalsPotentiallyEqual(GV, GV2);
+ LLVM_FALLTHROUGH;
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ // We can't evaluate floating point casts or truncations.
+ if (CE1Op0->getType()->isFPOrFPVectorTy())
+ break;
+
+ // If the cast is not actually changing bits, and the second operand is a
+ // null pointer, do the comparison with the pre-casted value.
+ if (V2->isNullValue() && CE1->getType()->isIntOrPtrTy()) {
+ if (CE1->getOpcode() == Instruction::ZExt) isSigned = false;
+ if (CE1->getOpcode() == Instruction::SExt) isSigned = true;
+ return evaluateICmpRelation(CE1Op0,
+ Constant::getNullValue(CE1Op0->getType()),
+ isSigned);
+ }
+ break;
+
+ case Instruction::GetElementPtr: {
+ GEPOperator *CE1GEP = cast<GEPOperator>(CE1);
+ // Ok, since this is a getelementptr, we know that the constant has a
+ // pointer type. Check the various cases.
+ if (isa<ConstantPointerNull>(V2)) {
+ // If we are comparing a GEP to a null pointer, check to see if the base
+ // of the GEP equals the null pointer.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
+ // If its not weak linkage, the GVal must have a non-zero address
+ // so the result is greater-than
+ if (!GV->hasExternalWeakLinkage() && CE1GEP->isInBounds())
+ return ICmpInst::ICMP_UGT;
+ }
+ } else if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
+ if (GV != GV2) {
+ if (CE1GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(GV, GV2);
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+ }
+ } else if (const auto *CE2GEP = dyn_cast<GEPOperator>(V2)) {
+ // By far the most common case to handle is when the base pointers are
+ // obviously to the same global.
+ const Constant *CE2Op0 = cast<Constant>(CE2GEP->getPointerOperand());
+ if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
+ // Don't know relative ordering, but check for inequality.
+ if (CE1Op0 != CE2Op0) {
+ if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
+ return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
+ cast<GlobalValue>(CE2Op0));
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ }
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return ICmpInst::BAD_ICMP_PREDICATE;
+}
+
+Constant *llvm::ConstantFoldCompareInstruction(CmpInst::Predicate Predicate,
+ Constant *C1, Constant *C2) {
+ Type *ResultTy;
+ if (VectorType *VT = dyn_cast<VectorType>(C1->getType()))
+ ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()),
+ VT->getElementCount());
+ else
+ ResultTy = Type::getInt1Ty(C1->getContext());
+
+ // Fold FCMP_FALSE/FCMP_TRUE unconditionally.
+ if (Predicate == FCmpInst::FCMP_FALSE)
+ return Constant::getNullValue(ResultTy);
+
+ if (Predicate == FCmpInst::FCMP_TRUE)
+ return Constant::getAllOnesValue(ResultTy);
+
+ // Handle some degenerate cases first
+ if (isa<PoisonValue>(C1) || isa<PoisonValue>(C2))
+ return PoisonValue::get(ResultTy);
+
+ if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
+ bool isIntegerPredicate = ICmpInst::isIntPredicate(Predicate);
+ // For EQ and NE, we can always pick a value for the undef to make the
+ // predicate pass or fail, so we can return undef.
+ // Also, if both operands are undef, we can return undef for int comparison.
+ if (ICmpInst::isEquality(Predicate) || (isIntegerPredicate && C1 == C2))
+ return UndefValue::get(ResultTy);
+
+ // Otherwise, for integer compare, pick the same value as the non-undef
+ // operand, and fold it to true or false.
+ if (isIntegerPredicate)
+ return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(Predicate));
+
+ // Choosing NaN for the undef will always make unordered comparison succeed
+ // and ordered comparison fails.
+ return ConstantInt::get(ResultTy, CmpInst::isUnordered(Predicate));
+ }
+
+ // icmp eq/ne(null,GV) -> false/true
+ if (C1->isNullValue()) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2))
+ // Don't try to evaluate aliases. External weak GV can be null.
+ if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
+ !NullPointerIsDefined(nullptr /* F */,
+ GV->getType()->getAddressSpace())) {
+ if (Predicate == ICmpInst::ICMP_EQ)
+ return ConstantInt::getFalse(C1->getContext());
+ else if (Predicate == ICmpInst::ICMP_NE)
+ return ConstantInt::getTrue(C1->getContext());
+ }
+ // icmp eq/ne(GV,null) -> false/true
+ } else if (C2->isNullValue()) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C1)) {
+ // Don't try to evaluate aliases. External weak GV can be null.
+ if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
+ !NullPointerIsDefined(nullptr /* F */,
+ GV->getType()->getAddressSpace())) {
+ if (Predicate == ICmpInst::ICMP_EQ)
+ return ConstantInt::getFalse(C1->getContext());
+ else if (Predicate == ICmpInst::ICMP_NE)
+ return ConstantInt::getTrue(C1->getContext());
+ }
+ }
+
+ // The caller is expected to commute the operands if the constant expression
+ // is C2.
+ // C1 >= 0 --> true
+ if (Predicate == ICmpInst::ICMP_UGE)
+ return Constant::getAllOnesValue(ResultTy);
+ // C1 < 0 --> false
+ if (Predicate == ICmpInst::ICMP_ULT)
+ return Constant::getNullValue(ResultTy);
+ }
+
+ // If the comparison is a comparison between two i1's, simplify it.
+ if (C1->getType()->isIntegerTy(1)) {
+ switch (Predicate) {
+ case ICmpInst::ICMP_EQ:
+ if (isa<ConstantInt>(C2))
+ return ConstantExpr::getXor(C1, ConstantExpr::getNot(C2));
+ return ConstantExpr::getXor(ConstantExpr::getNot(C1), C2);
+ case ICmpInst::ICMP_NE:
+ return ConstantExpr::getXor(C1, C2);
+ default:
+ break;
+ }
+ }
+
+ if (isa<ConstantInt>(C1) && isa<ConstantInt>(C2)) {
+ const APInt &V1 = cast<ConstantInt>(C1)->getValue();
+ const APInt &V2 = cast<ConstantInt>(C2)->getValue();
+ return ConstantInt::get(ResultTy, ICmpInst::compare(V1, V2, Predicate));
+ } else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) {
+ const APFloat &C1V = cast<ConstantFP>(C1)->getValueAPF();
+ const APFloat &C2V = cast<ConstantFP>(C2)->getValueAPF();
+ return ConstantInt::get(ResultTy, FCmpInst::compare(C1V, C2V, Predicate));
+ } else if (auto *C1VTy = dyn_cast<VectorType>(C1->getType())) {
+
+ // Fast path for splatted constants.
+ if (Constant *C1Splat = C1->getSplatValue())
+ if (Constant *C2Splat = C2->getSplatValue())
+ return ConstantVector::getSplat(
+ C1VTy->getElementCount(),
+ ConstantExpr::getCompare(Predicate, C1Splat, C2Splat));
+
+ // Do not iterate on scalable vector. The number of elements is unknown at
+ // compile-time.
+ if (isa<ScalableVectorType>(C1VTy))
+ return nullptr;
+
+ // If we can constant fold the comparison of each element, constant fold
+ // the whole vector comparison.
+ SmallVector<Constant*, 4> ResElts;
+ Type *Ty = IntegerType::get(C1->getContext(), 32);
+ // Compare the elements, producing an i1 result or constant expr.
+ for (unsigned I = 0, E = C1VTy->getElementCount().getKnownMinValue();
+ I != E; ++I) {
+ Constant *C1E =
+ ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, I));
+ Constant *C2E =
+ ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, I));
+
+ ResElts.push_back(ConstantExpr::getCompare(Predicate, C1E, C2E));
+ }
+
+ return ConstantVector::get(ResElts);
+ }
+
+ if (C1->getType()->isFloatingPointTy() &&
+ // Only call evaluateFCmpRelation if we have a constant expr to avoid
+ // infinite recursive loop
+ (isa<ConstantExpr>(C1) || isa<ConstantExpr>(C2))) {
+ int Result = -1; // -1 = unknown, 0 = known false, 1 = known true.
+ switch (evaluateFCmpRelation(C1, C2)) {
+ default: llvm_unreachable("Unknown relation!");
+ case FCmpInst::FCMP_UNO:
+ case FCmpInst::FCMP_ORD:
+ case FCmpInst::FCMP_UNE:
+ case FCmpInst::FCMP_ULT:
+ case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_UGE:
+ case FCmpInst::FCMP_TRUE:
+ case FCmpInst::FCMP_FALSE:
+ case FCmpInst::BAD_FCMP_PREDICATE:
+ break; // Couldn't determine anything about these constants.
+ case FCmpInst::FCMP_OEQ: // We know that C1 == C2
+ Result =
+ (Predicate == FCmpInst::FCMP_UEQ || Predicate == FCmpInst::FCMP_OEQ ||
+ Predicate == FCmpInst::FCMP_ULE || Predicate == FCmpInst::FCMP_OLE ||
+ Predicate == FCmpInst::FCMP_UGE || Predicate == FCmpInst::FCMP_OGE);
+ break;
+ case FCmpInst::FCMP_OLT: // We know that C1 < C2
+ Result =
+ (Predicate == FCmpInst::FCMP_UNE || Predicate == FCmpInst::FCMP_ONE ||
+ Predicate == FCmpInst::FCMP_ULT || Predicate == FCmpInst::FCMP_OLT ||
+ Predicate == FCmpInst::FCMP_ULE || Predicate == FCmpInst::FCMP_OLE);
+ break;
+ case FCmpInst::FCMP_OGT: // We know that C1 > C2
+ Result =
+ (Predicate == FCmpInst::FCMP_UNE || Predicate == FCmpInst::FCMP_ONE ||
+ Predicate == FCmpInst::FCMP_UGT || Predicate == FCmpInst::FCMP_OGT ||
+ Predicate == FCmpInst::FCMP_UGE || Predicate == FCmpInst::FCMP_OGE);
+ break;
+ case FCmpInst::FCMP_OLE: // We know that C1 <= C2
+ // We can only partially decide this relation.
+ if (Predicate == FCmpInst::FCMP_UGT || Predicate == FCmpInst::FCMP_OGT)
+ Result = 0;
+ else if (Predicate == FCmpInst::FCMP_ULT ||
+ Predicate == FCmpInst::FCMP_OLT)
+ Result = 1;
+ break;
+ case FCmpInst::FCMP_OGE: // We known that C1 >= C2
+ // We can only partially decide this relation.
+ if (Predicate == FCmpInst::FCMP_ULT || Predicate == FCmpInst::FCMP_OLT)
+ Result = 0;
+ else if (Predicate == FCmpInst::FCMP_UGT ||
+ Predicate == FCmpInst::FCMP_OGT)
+ Result = 1;
+ break;
+ case FCmpInst::FCMP_ONE: // We know that C1 != C2
+ // We can only partially decide this relation.
+ if (Predicate == FCmpInst::FCMP_OEQ || Predicate == FCmpInst::FCMP_UEQ)
+ Result = 0;
+ else if (Predicate == FCmpInst::FCMP_ONE ||
+ Predicate == FCmpInst::FCMP_UNE)
+ Result = 1;
+ break;
+ case FCmpInst::FCMP_UEQ: // We know that C1 == C2 || isUnordered(C1, C2).
+ // We can only partially decide this relation.
+ if (Predicate == FCmpInst::FCMP_ONE)
+ Result = 0;
+ else if (Predicate == FCmpInst::FCMP_UEQ)
+ Result = 1;
+ break;
+ }
+
+ // If we evaluated the result, return it now.
+ if (Result != -1)
+ return ConstantInt::get(ResultTy, Result);
+
+ } else {
+ // Evaluate the relation between the two constants, per the predicate.
+ int Result = -1; // -1 = unknown, 0 = known false, 1 = known true.
+ switch (evaluateICmpRelation(C1, C2, CmpInst::isSigned(Predicate))) {
+ default: llvm_unreachable("Unknown relational!");
+ case ICmpInst::BAD_ICMP_PREDICATE:
+ break; // Couldn't determine anything about these constants.
+ case ICmpInst::ICMP_EQ: // We know the constants are equal!
+ // If we know the constants are equal, we can decide the result of this
+ // computation precisely.
+ Result = ICmpInst::isTrueWhenEqual(Predicate);
+ break;
+ case ICmpInst::ICMP_ULT:
+ switch (Predicate) {
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_ULE:
+ Result = 1; break;
+ case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_UGE:
+ Result = 0; break;
+ default:
+ break;
+ }
+ break;
+ case ICmpInst::ICMP_SLT:
+ switch (Predicate) {
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SLE:
+ Result = 1; break;
+ case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SGE:
+ Result = 0; break;
+ default:
+ break;
+ }
+ break;
+ case ICmpInst::ICMP_UGT:
+ switch (Predicate) {
+ case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE:
+ Result = 1; break;
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE:
+ Result = 0; break;
+ default:
+ break;
+ }
+ break;
+ case ICmpInst::ICMP_SGT:
+ switch (Predicate) {
+ case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SGE:
+ Result = 1; break;
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SLE:
+ Result = 0; break;
+ default:
+ break;
+ }
+ break;
+ case ICmpInst::ICMP_ULE:
+ if (Predicate == ICmpInst::ICMP_UGT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_ULT || Predicate == ICmpInst::ICMP_ULE)
+ Result = 1;
+ break;
+ case ICmpInst::ICMP_SLE:
+ if (Predicate == ICmpInst::ICMP_SGT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_SLT || Predicate == ICmpInst::ICMP_SLE)
+ Result = 1;
+ break;
+ case ICmpInst::ICMP_UGE:
+ if (Predicate == ICmpInst::ICMP_ULT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_UGT || Predicate == ICmpInst::ICMP_UGE)
+ Result = 1;
+ break;
+ case ICmpInst::ICMP_SGE:
+ if (Predicate == ICmpInst::ICMP_SLT)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_SGT || Predicate == ICmpInst::ICMP_SGE)
+ Result = 1;
+ break;
+ case ICmpInst::ICMP_NE:
+ if (Predicate == ICmpInst::ICMP_EQ)
+ Result = 0;
+ if (Predicate == ICmpInst::ICMP_NE)
+ Result = 1;
+ break;
+ }
+
+ // If we evaluated the result, return it now.
+ if (Result != -1)
+ return ConstantInt::get(ResultTy, Result);
+
+ // If the right hand side is a bitcast, try using its inverse to simplify
+ // it by moving it to the left hand side. We can't do this if it would turn
+ // a vector compare into a scalar compare or visa versa, or if it would turn
+ // the operands into FP values.
+ if (ConstantExpr *CE2 = dyn_cast<ConstantExpr>(C2)) {
+ Constant *CE2Op0 = CE2->getOperand(0);
+ if (CE2->getOpcode() == Instruction::BitCast &&
+ CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy() &&
+ !CE2Op0->getType()->isFPOrFPVectorTy()) {
+ Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType());
+ return ConstantExpr::getICmp(Predicate, Inverse, CE2Op0);
+ }
+ }
+
+ // If the left hand side is an extension, try eliminating it.
+ if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
+ if ((CE1->getOpcode() == Instruction::SExt &&
+ ICmpInst::isSigned(Predicate)) ||
+ (CE1->getOpcode() == Instruction::ZExt &&
+ !ICmpInst::isSigned(Predicate))) {
+ Constant *CE1Op0 = CE1->getOperand(0);
+ Constant *CE1Inverse = ConstantExpr::getTrunc(CE1, CE1Op0->getType());
+ if (CE1Inverse == CE1Op0) {
+ // Check whether we can safely truncate the right hand side.
+ Constant *C2Inverse = ConstantExpr::getTrunc(C2, CE1Op0->getType());
+ if (ConstantExpr::getCast(CE1->getOpcode(), C2Inverse,
+ C2->getType()) == C2)
+ return ConstantExpr::getICmp(Predicate, CE1Inverse, C2Inverse);
+ }
+ }
+ }
+
+ if ((!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) ||
+ (C1->isNullValue() && !C2->isNullValue())) {
+ // If C2 is a constant expr and C1 isn't, flip them around and fold the
+ // other way if possible.
+ // Also, if C1 is null and C2 isn't, flip them around.
+ Predicate = ICmpInst::getSwappedPredicate(Predicate);
+ return ConstantExpr::getICmp(Predicate, C2, C1);
+ }
+ }
+ return nullptr;
+}
+
+/// Test whether the given sequence of *normalized* indices is "inbounds".
+template<typename IndexTy>
+static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
+ // No indices means nothing that could be out of bounds.
+ if (Idxs.empty()) return true;
+
+ // If the first index is zero, it's in bounds.
+ if (cast<Constant>(Idxs[0])->isNullValue()) return true;
+
+ // If the first index is one and all the rest are zero, it's in bounds,
+ // by the one-past-the-end rule.
+ if (auto *CI = dyn_cast<ConstantInt>(Idxs[0])) {
+ if (!CI->isOne())
+ return false;
+ } else {
+ auto *CV = cast<ConstantDataVector>(Idxs[0]);
+ CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
+ if (!CI || !CI->isOne())
+ return false;
+ }
+
+ for (unsigned i = 1, e = Idxs.size(); i != e; ++i)
+ if (!cast<Constant>(Idxs[i])->isNullValue())
+ return false;
+ return true;
+}
+
+/// Test whether a given ConstantInt is in-range for a SequentialType.
+static bool isIndexInRangeOfArrayType(uint64_t NumElements,
+ const ConstantInt *CI) {
+ // We cannot bounds check the index if it doesn't fit in an int64_t.
+ if (CI->getValue().getMinSignedBits() > 64)
+ return false;
+
+ // A negative index or an index past the end of our sequential type is
+ // considered out-of-range.
+ int64_t IndexVal = CI->getSExtValue();
+ if (IndexVal < 0 || (NumElements > 0 && (uint64_t)IndexVal >= NumElements))
+ return false;
+
+ // Otherwise, it is in-range.
+ return true;
+}
+
+// Combine Indices - If the source pointer to this getelementptr instruction
+// is a getelementptr instruction, combine the indices of the two
+// getelementptr instructions into a single instruction.
+static Constant *foldGEPOfGEP(GEPOperator *GEP, Type *PointeeTy, bool InBounds,
+ ArrayRef<Value *> Idxs) {
+ if (PointeeTy != GEP->getResultElementType())
+ return nullptr;
+
+ Constant *Idx0 = cast<Constant>(Idxs[0]);
+ if (Idx0->isNullValue()) {
+ // Handle the simple case of a zero index.
+ SmallVector<Value*, 16> NewIndices;
+ NewIndices.reserve(Idxs.size() + GEP->getNumIndices());
+ NewIndices.append(GEP->idx_begin(), GEP->idx_end());
+ NewIndices.append(Idxs.begin() + 1, Idxs.end());
+ return ConstantExpr::getGetElementPtr(
+ GEP->getSourceElementType(), cast<Constant>(GEP->getPointerOperand()),
+ NewIndices, InBounds && GEP->isInBounds(), GEP->getInRangeIndex());
+ }
+
+ gep_type_iterator LastI = gep_type_end(GEP);
+ for (gep_type_iterator I = gep_type_begin(GEP), E = gep_type_end(GEP);
+ I != E; ++I)
+ LastI = I;
+
+ // We can't combine GEPs if the last index is a struct type.
+ if (!LastI.isSequential())
+ return nullptr;
+ // We could perform the transform with non-constant index, but prefer leaving
+ // it as GEP of GEP rather than GEP of add for now.
+ ConstantInt *CI = dyn_cast<ConstantInt>(Idx0);
+ if (!CI)
+ return nullptr;
+
+ // TODO: This code may be extended to handle vectors as well.
+ auto *LastIdx = cast<Constant>(GEP->getOperand(GEP->getNumOperands()-1));
+ Type *LastIdxTy = LastIdx->getType();
+ if (LastIdxTy->isVectorTy())
+ return nullptr;
+
+ SmallVector<Value*, 16> NewIndices;
+ NewIndices.reserve(Idxs.size() + GEP->getNumIndices());
+ NewIndices.append(GEP->idx_begin(), GEP->idx_end() - 1);
+
+ // Add the last index of the source with the first index of the new GEP.
+ // Make sure to handle the case when they are actually different types.
+ if (LastIdxTy != Idx0->getType()) {
+ unsigned CommonExtendedWidth =
+ std::max(LastIdxTy->getIntegerBitWidth(),
+ Idx0->getType()->getIntegerBitWidth());
+ CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
+
+ Type *CommonTy =
+ Type::getIntNTy(LastIdxTy->getContext(), CommonExtendedWidth);
+ Idx0 = ConstantExpr::getSExtOrBitCast(Idx0, CommonTy);
+ LastIdx = ConstantExpr::getSExtOrBitCast(LastIdx, CommonTy);
+ }
+
+ NewIndices.push_back(ConstantExpr::get(Instruction::Add, Idx0, LastIdx));
+ NewIndices.append(Idxs.begin() + 1, Idxs.end());
+
+ // The combined GEP normally inherits its index inrange attribute from
+ // the inner GEP, but if the inner GEP's last index was adjusted by the
+ // outer GEP, any inbounds attribute on that index is invalidated.
+ Optional<unsigned> IRIndex = GEP->getInRangeIndex();
+ if (IRIndex && *IRIndex == GEP->getNumIndices() - 1)
+ IRIndex = None;
+
+ return ConstantExpr::getGetElementPtr(
+ GEP->getSourceElementType(), cast<Constant>(GEP->getPointerOperand()),
+ NewIndices, InBounds && GEP->isInBounds(), IRIndex);
+}
+
+Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
+ bool InBounds,
+ Optional<unsigned> InRangeIndex,
+ ArrayRef<Value *> Idxs) {
+ if (Idxs.empty()) return C;
+
+ Type *GEPTy = GetElementPtrInst::getGEPReturnType(
+ PointeeTy, C, makeArrayRef((Value *const *)Idxs.data(), Idxs.size()));
+
+ if (isa<PoisonValue>(C))
+ return PoisonValue::get(GEPTy);
+
+ if (isa<UndefValue>(C))
+ // If inbounds, we can choose an out-of-bounds pointer as a base pointer.
+ return InBounds ? PoisonValue::get(GEPTy) : UndefValue::get(GEPTy);
+
+ auto IsNoOp = [&]() {
+ // For non-opaque pointers having multiple indices will change the result
+ // type of the GEP.
+ if (!C->getType()->getScalarType()->isOpaquePointerTy() && Idxs.size() != 1)
+ return false;
+
+ return all_of(Idxs, [](Value *Idx) {
+ Constant *IdxC = cast<Constant>(Idx);
+ return IdxC->isNullValue() || isa<UndefValue>(IdxC);
+ });
+ };
+ if (IsNoOp())
+ return GEPTy->isVectorTy() && !C->getType()->isVectorTy()
+ ? ConstantVector::getSplat(
+ cast<VectorType>(GEPTy)->getElementCount(), C)
+ : C;
+
+ if (C->isNullValue()) {
+ bool isNull = true;
+ for (Value *Idx : Idxs)
+ if (!isa<UndefValue>(Idx) && !cast<Constant>(Idx)->isNullValue()) {
+ isNull = false;
+ break;
+ }
+ if (isNull) {
+ PointerType *PtrTy = cast<PointerType>(C->getType()->getScalarType());
+ Type *Ty = GetElementPtrInst::getIndexedType(PointeeTy, Idxs);
+
+ assert(Ty && "Invalid indices for GEP!");
+ Type *OrigGEPTy = PointerType::get(Ty, PtrTy->getAddressSpace());
+ Type *GEPTy = PointerType::get(Ty, PtrTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(C->getType()))
+ GEPTy = VectorType::get(OrigGEPTy, VT->getElementCount());
+
+ // The GEP returns a vector of pointers when one of more of
+ // its arguments is a vector.
+ for (Value *Idx : Idxs) {
+ if (auto *VT = dyn_cast<VectorType>(Idx->getType())) {
+ assert((!isa<VectorType>(GEPTy) || isa<ScalableVectorType>(GEPTy) ==
+ isa<ScalableVectorType>(VT)) &&
+ "Mismatched GEPTy vector types");
+ GEPTy = VectorType::get(OrigGEPTy, VT->getElementCount());
+ break;
+ }
+ }
+
+ return Constant::getNullValue(GEPTy);
+ }
+ }
+
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (auto *GEP = dyn_cast<GEPOperator>(CE))
+ if (Constant *C = foldGEPOfGEP(GEP, PointeeTy, InBounds, Idxs))
+ return C;
+
+ // Attempt to fold casts to the same type away. For example, folding:
+ //
+ // i32* getelementptr ([2 x i32]* bitcast ([3 x i32]* %X to [2 x i32]*),
+ // i64 0, i64 0)
+ // into:
+ //
+ // i32* getelementptr ([3 x i32]* %X, i64 0, i64 0)
+ //
+ // Don't fold if the cast is changing address spaces.
+ Constant *Idx0 = cast<Constant>(Idxs[0]);
+ if (CE->isCast() && Idxs.size() > 1 && Idx0->isNullValue()) {
+ PointerType *SrcPtrTy =
+ dyn_cast<PointerType>(CE->getOperand(0)->getType());
+ PointerType *DstPtrTy = dyn_cast<PointerType>(CE->getType());
+ if (SrcPtrTy && DstPtrTy && !SrcPtrTy->isOpaque() &&
+ !DstPtrTy->isOpaque()) {
+ ArrayType *SrcArrayTy =
+ dyn_cast<ArrayType>(SrcPtrTy->getNonOpaquePointerElementType());
+ ArrayType *DstArrayTy =
+ dyn_cast<ArrayType>(DstPtrTy->getNonOpaquePointerElementType());
+ if (SrcArrayTy && DstArrayTy
+ && SrcArrayTy->getElementType() == DstArrayTy->getElementType()
+ && SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
+ return ConstantExpr::getGetElementPtr(SrcArrayTy,
+ (Constant *)CE->getOperand(0),
+ Idxs, InBounds, InRangeIndex);
+ }
+ }
+ }
+
+ // Check to see if any array indices are not within the corresponding
+ // notional array or vector bounds. If so, try to determine if they can be
+ // factored out into preceding dimensions.
+ SmallVector<Constant *, 8> NewIdxs;
+ Type *Ty = PointeeTy;
+ Type *Prev = C->getType();
+ auto GEPIter = gep_type_begin(PointeeTy, Idxs);
+ bool Unknown =
+ !isa<ConstantInt>(Idxs[0]) && !isa<ConstantDataVector>(Idxs[0]);
+ for (unsigned i = 1, e = Idxs.size(); i != e;
+ Prev = Ty, Ty = (++GEPIter).getIndexedType(), ++i) {
+ if (!isa<ConstantInt>(Idxs[i]) && !isa<ConstantDataVector>(Idxs[i])) {
+ // We don't know if it's in range or not.
+ Unknown = true;
+ continue;
+ }
+ if (!isa<ConstantInt>(Idxs[i - 1]) && !isa<ConstantDataVector>(Idxs[i - 1]))
+ // Skip if the type of the previous index is not supported.
+ continue;
+ if (InRangeIndex && i == *InRangeIndex + 1) {
+ // If an index is marked inrange, we cannot apply this canonicalization to
+ // the following index, as that will cause the inrange index to point to
+ // the wrong element.
+ continue;
+ }
+ if (isa<StructType>(Ty)) {
+ // The verify makes sure that GEPs into a struct are in range.
+ continue;
+ }
+ if (isa<VectorType>(Ty)) {
+ // There can be awkward padding in after a non-power of two vector.
+ Unknown = true;
+ continue;
+ }
+ auto *STy = cast<ArrayType>(Ty);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
+ if (isIndexInRangeOfArrayType(STy->getNumElements(), CI))
+ // It's in range, skip to the next index.
+ continue;
+ if (CI->isNegative()) {
+ // It's out of range and negative, don't try to factor it.
+ Unknown = true;
+ continue;
+ }
+ } else {
+ auto *CV = cast<ConstantDataVector>(Idxs[i]);
+ bool InRange = true;
+ for (unsigned I = 0, E = CV->getNumElements(); I != E; ++I) {
+ auto *CI = cast<ConstantInt>(CV->getElementAsConstant(I));
+ InRange &= isIndexInRangeOfArrayType(STy->getNumElements(), CI);
+ if (CI->isNegative()) {
+ Unknown = true;
+ break;
+ }
+ }
+ if (InRange || Unknown)
+ // It's in range, skip to the next index.
+ // It's out of range and negative, don't try to factor it.
+ continue;
+ }
+ if (isa<StructType>(Prev)) {
+ // It's out of range, but the prior dimension is a struct
+ // so we can't do anything about it.
+ Unknown = true;
+ continue;
+ }
+ // It's out of range, but we can factor it into the prior
+ // dimension.
+ NewIdxs.resize(Idxs.size());
+ // Determine the number of elements in our sequential type.
+ uint64_t NumElements = STy->getArrayNumElements();
+
+ // Expand the current index or the previous index to a vector from a scalar
+ // if necessary.
+ Constant *CurrIdx = cast<Constant>(Idxs[i]);
+ auto *PrevIdx =
+ NewIdxs[i - 1] ? NewIdxs[i - 1] : cast<Constant>(Idxs[i - 1]);
+ bool IsCurrIdxVector = CurrIdx->getType()->isVectorTy();
+ bool IsPrevIdxVector = PrevIdx->getType()->isVectorTy();
+ bool UseVector = IsCurrIdxVector || IsPrevIdxVector;
+
+ if (!IsCurrIdxVector && IsPrevIdxVector)
+ CurrIdx = ConstantDataVector::getSplat(
+ cast<FixedVectorType>(PrevIdx->getType())->getNumElements(), CurrIdx);
+
+ if (!IsPrevIdxVector && IsCurrIdxVector)
+ PrevIdx = ConstantDataVector::getSplat(
+ cast<FixedVectorType>(CurrIdx->getType())->getNumElements(), PrevIdx);
+
+ Constant *Factor =
+ ConstantInt::get(CurrIdx->getType()->getScalarType(), NumElements);
+ if (UseVector)
+ Factor = ConstantDataVector::getSplat(
+ IsPrevIdxVector
+ ? cast<FixedVectorType>(PrevIdx->getType())->getNumElements()
+ : cast<FixedVectorType>(CurrIdx->getType())->getNumElements(),
+ Factor);
+
+ NewIdxs[i] =
+ ConstantFoldBinaryInstruction(Instruction::SRem, CurrIdx, Factor);
+
+ Constant *Div =
+ ConstantFoldBinaryInstruction(Instruction::SDiv, CurrIdx, Factor);
+
+ // We're working on either ConstantInt or vectors of ConstantInt,
+ // so these should always fold.
+ assert(NewIdxs[i] != nullptr && Div != nullptr && "Should have folded");
+
+ unsigned CommonExtendedWidth =
+ std::max(PrevIdx->getType()->getScalarSizeInBits(),
+ Div->getType()->getScalarSizeInBits());
+ CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
+
+ // Before adding, extend both operands to i64 to avoid
+ // overflow trouble.
+ Type *ExtendedTy = Type::getIntNTy(Div->getContext(), CommonExtendedWidth);
+ if (UseVector)
+ ExtendedTy = FixedVectorType::get(
+ ExtendedTy,
+ IsPrevIdxVector
+ ? cast<FixedVectorType>(PrevIdx->getType())->getNumElements()
+ : cast<FixedVectorType>(CurrIdx->getType())->getNumElements());
+
+ if (!PrevIdx->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
+ PrevIdx = ConstantExpr::getSExt(PrevIdx, ExtendedTy);
+
+ if (!Div->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
+ Div = ConstantExpr::getSExt(Div, ExtendedTy);
+
+ NewIdxs[i - 1] = ConstantExpr::getAdd(PrevIdx, Div);
+ }
+
+ // If we did any factoring, start over with the adjusted indices.
+ if (!NewIdxs.empty()) {
+ for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
+ if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
+ return ConstantExpr::getGetElementPtr(PointeeTy, C, NewIdxs, InBounds,
+ InRangeIndex);
+ }
+
+ // If all indices are known integers and normalized, we can do a simple
+ // check for the "inbounds" property.
+ if (!Unknown && !InBounds)
+ if (auto *GV = dyn_cast<GlobalVariable>(C))
+ if (!GV->hasExternalWeakLinkage() && isInBoundsIndices(Idxs))
+ return ConstantExpr::getGetElementPtr(PointeeTy, C, Idxs,
+ /*InBounds=*/true, InRangeIndex);
+
+ return nullptr;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/ConstantRange.cpp b/contrib/llvm-project/llvm/lib/IR/ConstantRange.cpp
new file mode 100644
index 000000000000..9d239101d8fd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ConstantRange.cpp
@@ -0,0 +1,1820 @@
+//===- ConstantRange.cpp - ConstantRange implementation -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Represent a range of possible values that may occur when the program is run
+// for an integral value. This keeps track of a lower and upper bound for the
+// constant, which MAY wrap around the end of the numeric range. To do this, it
+// keeps track of a [lower, upper) bound, which specifies an interval just like
+// STL iterators. When used with boolean values, the following are important
+// ranges (other integral ranges use min/max values for special range values):
+//
+// [F, F) = {} = Empty set
+// [T, F) = {T}
+// [F, T) = {F}
+// [T, T) = {F, T} = Full set
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+
+using namespace llvm;
+
+ConstantRange::ConstantRange(uint32_t BitWidth, bool Full)
+ : Lower(Full ? APInt::getMaxValue(BitWidth) : APInt::getMinValue(BitWidth)),
+ Upper(Lower) {}
+
+ConstantRange::ConstantRange(APInt V)
+ : Lower(std::move(V)), Upper(Lower + 1) {}
+
+ConstantRange::ConstantRange(APInt L, APInt U)
+ : Lower(std::move(L)), Upper(std::move(U)) {
+ assert(Lower.getBitWidth() == Upper.getBitWidth() &&
+ "ConstantRange with unequal bit widths");
+ assert((Lower != Upper || (Lower.isMaxValue() || Lower.isMinValue())) &&
+ "Lower == Upper, but they aren't min or max value!");
+}
+
+ConstantRange ConstantRange::fromKnownBits(const KnownBits &Known,
+ bool IsSigned) {
+ assert(!Known.hasConflict() && "Expected valid KnownBits");
+
+ if (Known.isUnknown())
+ return getFull(Known.getBitWidth());
+
+ // For unsigned ranges, or signed ranges with known sign bit, create a simple
+ // range between the smallest and largest possible value.
+ if (!IsSigned || Known.isNegative() || Known.isNonNegative())
+ return ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1);
+
+ // If we don't know the sign bit, pick the lower bound as a negative number
+ // and the upper bound as a non-negative one.
+ APInt Lower = Known.getMinValue(), Upper = Known.getMaxValue();
+ Lower.setSignBit();
+ Upper.clearSignBit();
+ return ConstantRange(Lower, Upper + 1);
+}
+
+KnownBits ConstantRange::toKnownBits() const {
+ // TODO: We could return conflicting known bits here, but consumers are
+ // likely not prepared for that.
+ if (isEmptySet())
+ return KnownBits(getBitWidth());
+
+ // We can only retain the top bits that are the same between min and max.
+ APInt Min = getUnsignedMin();
+ APInt Max = getUnsignedMax();
+ KnownBits Known = KnownBits::makeConstant(Min);
+ if (Optional<unsigned> DifferentBit =
+ APIntOps::GetMostSignificantDifferentBit(Min, Max)) {
+ Known.Zero.clearLowBits(*DifferentBit + 1);
+ Known.One.clearLowBits(*DifferentBit + 1);
+ }
+ return Known;
+}
+
+ConstantRange ConstantRange::makeAllowedICmpRegion(CmpInst::Predicate Pred,
+ const ConstantRange &CR) {
+ if (CR.isEmptySet())
+ return CR;
+
+ uint32_t W = CR.getBitWidth();
+ switch (Pred) {
+ default:
+ llvm_unreachable("Invalid ICmp predicate to makeAllowedICmpRegion()");
+ case CmpInst::ICMP_EQ:
+ return CR;
+ case CmpInst::ICMP_NE:
+ if (CR.isSingleElement())
+ return ConstantRange(CR.getUpper(), CR.getLower());
+ return getFull(W);
+ case CmpInst::ICMP_ULT: {
+ APInt UMax(CR.getUnsignedMax());
+ if (UMax.isMinValue())
+ return getEmpty(W);
+ return ConstantRange(APInt::getMinValue(W), std::move(UMax));
+ }
+ case CmpInst::ICMP_SLT: {
+ APInt SMax(CR.getSignedMax());
+ if (SMax.isMinSignedValue())
+ return getEmpty(W);
+ return ConstantRange(APInt::getSignedMinValue(W), std::move(SMax));
+ }
+ case CmpInst::ICMP_ULE:
+ return getNonEmpty(APInt::getMinValue(W), CR.getUnsignedMax() + 1);
+ case CmpInst::ICMP_SLE:
+ return getNonEmpty(APInt::getSignedMinValue(W), CR.getSignedMax() + 1);
+ case CmpInst::ICMP_UGT: {
+ APInt UMin(CR.getUnsignedMin());
+ if (UMin.isMaxValue())
+ return getEmpty(W);
+ return ConstantRange(std::move(UMin) + 1, APInt::getZero(W));
+ }
+ case CmpInst::ICMP_SGT: {
+ APInt SMin(CR.getSignedMin());
+ if (SMin.isMaxSignedValue())
+ return getEmpty(W);
+ return ConstantRange(std::move(SMin) + 1, APInt::getSignedMinValue(W));
+ }
+ case CmpInst::ICMP_UGE:
+ return getNonEmpty(CR.getUnsignedMin(), APInt::getZero(W));
+ case CmpInst::ICMP_SGE:
+ return getNonEmpty(CR.getSignedMin(), APInt::getSignedMinValue(W));
+ }
+}
+
+ConstantRange ConstantRange::makeSatisfyingICmpRegion(CmpInst::Predicate Pred,
+ const ConstantRange &CR) {
+ // Follows from De-Morgan's laws:
+ //
+ // ~(~A union ~B) == A intersect B.
+ //
+ return makeAllowedICmpRegion(CmpInst::getInversePredicate(Pred), CR)
+ .inverse();
+}
+
+ConstantRange ConstantRange::makeExactICmpRegion(CmpInst::Predicate Pred,
+ const APInt &C) {
+ // Computes the exact range that is equal to both the constant ranges returned
+ // by makeAllowedICmpRegion and makeSatisfyingICmpRegion. This is always true
+ // when RHS is a singleton such as an APInt and so the assert is valid.
+ // However for non-singleton RHS, for example ult [2,5) makeAllowedICmpRegion
+ // returns [0,4) but makeSatisfyICmpRegion returns [0,2).
+ //
+ assert(makeAllowedICmpRegion(Pred, C) == makeSatisfyingICmpRegion(Pred, C));
+ return makeAllowedICmpRegion(Pred, C);
+}
+
+bool ConstantRange::areInsensitiveToSignednessOfICmpPredicate(
+ const ConstantRange &CR1, const ConstantRange &CR2) {
+ if (CR1.isEmptySet() || CR2.isEmptySet())
+ return true;
+
+ return (CR1.isAllNonNegative() && CR2.isAllNonNegative()) ||
+ (CR1.isAllNegative() && CR2.isAllNegative());
+}
+
+bool ConstantRange::areInsensitiveToSignednessOfInvertedICmpPredicate(
+ const ConstantRange &CR1, const ConstantRange &CR2) {
+ if (CR1.isEmptySet() || CR2.isEmptySet())
+ return true;
+
+ return (CR1.isAllNonNegative() && CR2.isAllNegative()) ||
+ (CR1.isAllNegative() && CR2.isAllNonNegative());
+}
+
+CmpInst::Predicate ConstantRange::getEquivalentPredWithFlippedSignedness(
+ CmpInst::Predicate Pred, const ConstantRange &CR1,
+ const ConstantRange &CR2) {
+ assert(CmpInst::isIntPredicate(Pred) && CmpInst::isRelational(Pred) &&
+ "Only for relational integer predicates!");
+
+ CmpInst::Predicate FlippedSignednessPred =
+ CmpInst::getFlippedSignednessPredicate(Pred);
+
+ if (areInsensitiveToSignednessOfICmpPredicate(CR1, CR2))
+ return FlippedSignednessPred;
+
+ if (areInsensitiveToSignednessOfInvertedICmpPredicate(CR1, CR2))
+ return CmpInst::getInversePredicate(FlippedSignednessPred);
+
+ return CmpInst::Predicate::BAD_ICMP_PREDICATE;
+}
+
+void ConstantRange::getEquivalentICmp(CmpInst::Predicate &Pred,
+ APInt &RHS, APInt &Offset) const {
+ Offset = APInt(getBitWidth(), 0);
+ if (isFullSet() || isEmptySet()) {
+ Pred = isEmptySet() ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
+ RHS = APInt(getBitWidth(), 0);
+ } else if (auto *OnlyElt = getSingleElement()) {
+ Pred = CmpInst::ICMP_EQ;
+ RHS = *OnlyElt;
+ } else if (auto *OnlyMissingElt = getSingleMissingElement()) {
+ Pred = CmpInst::ICMP_NE;
+ RHS = *OnlyMissingElt;
+ } else if (getLower().isMinSignedValue() || getLower().isMinValue()) {
+ Pred =
+ getLower().isMinSignedValue() ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT;
+ RHS = getUpper();
+ } else if (getUpper().isMinSignedValue() || getUpper().isMinValue()) {
+ Pred =
+ getUpper().isMinSignedValue() ? CmpInst::ICMP_SGE : CmpInst::ICMP_UGE;
+ RHS = getLower();
+ } else {
+ Pred = CmpInst::ICMP_ULT;
+ RHS = getUpper() - getLower();
+ Offset = -getLower();
+ }
+
+ assert(ConstantRange::makeExactICmpRegion(Pred, RHS) == add(Offset) &&
+ "Bad result!");
+}
+
+bool ConstantRange::getEquivalentICmp(CmpInst::Predicate &Pred,
+ APInt &RHS) const {
+ APInt Offset;
+ getEquivalentICmp(Pred, RHS, Offset);
+ return Offset.isZero();
+}
+
+bool ConstantRange::icmp(CmpInst::Predicate Pred,
+ const ConstantRange &Other) const {
+ return makeSatisfyingICmpRegion(Pred, Other).contains(*this);
+}
+
+/// Exact mul nuw region for single element RHS.
+static ConstantRange makeExactMulNUWRegion(const APInt &V) {
+ unsigned BitWidth = V.getBitWidth();
+ if (V == 0)
+ return ConstantRange::getFull(V.getBitWidth());
+
+ return ConstantRange::getNonEmpty(
+ APIntOps::RoundingUDiv(APInt::getMinValue(BitWidth), V,
+ APInt::Rounding::UP),
+ APIntOps::RoundingUDiv(APInt::getMaxValue(BitWidth), V,
+ APInt::Rounding::DOWN) + 1);
+}
+
+/// Exact mul nsw region for single element RHS.
+static ConstantRange makeExactMulNSWRegion(const APInt &V) {
+ // Handle special case for 0, -1 and 1. See the last for reason why we
+ // specialize -1 and 1.
+ unsigned BitWidth = V.getBitWidth();
+ if (V == 0 || V.isOne())
+ return ConstantRange::getFull(BitWidth);
+
+ APInt MinValue = APInt::getSignedMinValue(BitWidth);
+ APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
+ // e.g. Returning [-127, 127], represented as [-127, -128).
+ if (V.isAllOnes())
+ return ConstantRange(-MaxValue, MinValue);
+
+ APInt Lower, Upper;
+ if (V.isNegative()) {
+ Lower = APIntOps::RoundingSDiv(MaxValue, V, APInt::Rounding::UP);
+ Upper = APIntOps::RoundingSDiv(MinValue, V, APInt::Rounding::DOWN);
+ } else {
+ Lower = APIntOps::RoundingSDiv(MinValue, V, APInt::Rounding::UP);
+ Upper = APIntOps::RoundingSDiv(MaxValue, V, APInt::Rounding::DOWN);
+ }
+ // ConstantRange ctor take a half inclusive interval [Lower, Upper + 1).
+ // Upper + 1 is guaranteed not to overflow, because |divisor| > 1. 0, -1,
+ // and 1 are already handled as special cases.
+ return ConstantRange(Lower, Upper + 1);
+}
+
+ConstantRange
+ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
+ const ConstantRange &Other,
+ unsigned NoWrapKind) {
+ using OBO = OverflowingBinaryOperator;
+
+ assert(Instruction::isBinaryOp(BinOp) && "Binary operators only!");
+
+ assert((NoWrapKind == OBO::NoSignedWrap ||
+ NoWrapKind == OBO::NoUnsignedWrap) &&
+ "NoWrapKind invalid!");
+
+ bool Unsigned = NoWrapKind == OBO::NoUnsignedWrap;
+ unsigned BitWidth = Other.getBitWidth();
+
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unsupported binary op");
+
+ case Instruction::Add: {
+ if (Unsigned)
+ return getNonEmpty(APInt::getZero(BitWidth), -Other.getUnsignedMax());
+
+ APInt SignedMinVal = APInt::getSignedMinValue(BitWidth);
+ APInt SMin = Other.getSignedMin(), SMax = Other.getSignedMax();
+ return getNonEmpty(
+ SMin.isNegative() ? SignedMinVal - SMin : SignedMinVal,
+ SMax.isStrictlyPositive() ? SignedMinVal - SMax : SignedMinVal);
+ }
+
+ case Instruction::Sub: {
+ if (Unsigned)
+ return getNonEmpty(Other.getUnsignedMax(), APInt::getMinValue(BitWidth));
+
+ APInt SignedMinVal = APInt::getSignedMinValue(BitWidth);
+ APInt SMin = Other.getSignedMin(), SMax = Other.getSignedMax();
+ return getNonEmpty(
+ SMax.isStrictlyPositive() ? SignedMinVal + SMax : SignedMinVal,
+ SMin.isNegative() ? SignedMinVal + SMin : SignedMinVal);
+ }
+
+ case Instruction::Mul:
+ if (Unsigned)
+ return makeExactMulNUWRegion(Other.getUnsignedMax());
+
+ return makeExactMulNSWRegion(Other.getSignedMin())
+ .intersectWith(makeExactMulNSWRegion(Other.getSignedMax()));
+
+ case Instruction::Shl: {
+ // For given range of shift amounts, if we ignore all illegal shift amounts
+ // (that always produce poison), what shift amount range is left?
+ ConstantRange ShAmt = Other.intersectWith(
+ ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, (BitWidth - 1) + 1)));
+ if (ShAmt.isEmptySet()) {
+ // If the entire range of shift amounts is already poison-producing,
+ // then we can freely add more poison-producing flags ontop of that.
+ return getFull(BitWidth);
+ }
+ // There are some legal shift amounts, we can compute conservatively-correct
+ // range of no-wrap inputs. Note that by now we have clamped the ShAmtUMax
+ // to be at most bitwidth-1, which results in most conservative range.
+ APInt ShAmtUMax = ShAmt.getUnsignedMax();
+ if (Unsigned)
+ return getNonEmpty(APInt::getZero(BitWidth),
+ APInt::getMaxValue(BitWidth).lshr(ShAmtUMax) + 1);
+ return getNonEmpty(APInt::getSignedMinValue(BitWidth).ashr(ShAmtUMax),
+ APInt::getSignedMaxValue(BitWidth).ashr(ShAmtUMax) + 1);
+ }
+ }
+}
+
+ConstantRange ConstantRange::makeExactNoWrapRegion(Instruction::BinaryOps BinOp,
+ const APInt &Other,
+ unsigned NoWrapKind) {
+ // makeGuaranteedNoWrapRegion() is exact for single-element ranges, as
+ // "for all" and "for any" coincide in this case.
+ return makeGuaranteedNoWrapRegion(BinOp, ConstantRange(Other), NoWrapKind);
+}
+
+bool ConstantRange::isFullSet() const {
+ return Lower == Upper && Lower.isMaxValue();
+}
+
+bool ConstantRange::isEmptySet() const {
+ return Lower == Upper && Lower.isMinValue();
+}
+
+bool ConstantRange::isWrappedSet() const {
+ return Lower.ugt(Upper) && !Upper.isZero();
+}
+
+bool ConstantRange::isUpperWrapped() const {
+ return Lower.ugt(Upper);
+}
+
+bool ConstantRange::isSignWrappedSet() const {
+ return Lower.sgt(Upper) && !Upper.isMinSignedValue();
+}
+
+bool ConstantRange::isUpperSignWrapped() const {
+ return Lower.sgt(Upper);
+}
+
+bool
+ConstantRange::isSizeStrictlySmallerThan(const ConstantRange &Other) const {
+ assert(getBitWidth() == Other.getBitWidth());
+ if (isFullSet())
+ return false;
+ if (Other.isFullSet())
+ return true;
+ return (Upper - Lower).ult(Other.Upper - Other.Lower);
+}
+
+bool
+ConstantRange::isSizeLargerThan(uint64_t MaxSize) const {
+ // If this a full set, we need special handling to avoid needing an extra bit
+ // to represent the size.
+ if (isFullSet())
+ return MaxSize == 0 || APInt::getMaxValue(getBitWidth()).ugt(MaxSize - 1);
+
+ return (Upper - Lower).ugt(MaxSize);
+}
+
+bool ConstantRange::isAllNegative() const {
+ // Empty set is all negative, full set is not.
+ if (isEmptySet())
+ return true;
+ if (isFullSet())
+ return false;
+
+ return !isUpperSignWrapped() && !Upper.isStrictlyPositive();
+}
+
+bool ConstantRange::isAllNonNegative() const {
+ // Empty and full set are automatically treated correctly.
+ return !isSignWrappedSet() && Lower.isNonNegative();
+}
+
+APInt ConstantRange::getUnsignedMax() const {
+ if (isFullSet() || isUpperWrapped())
+ return APInt::getMaxValue(getBitWidth());
+ return getUpper() - 1;
+}
+
+APInt ConstantRange::getUnsignedMin() const {
+ if (isFullSet() || isWrappedSet())
+ return APInt::getMinValue(getBitWidth());
+ return getLower();
+}
+
+APInt ConstantRange::getSignedMax() const {
+ if (isFullSet() || isUpperSignWrapped())
+ return APInt::getSignedMaxValue(getBitWidth());
+ return getUpper() - 1;
+}
+
+APInt ConstantRange::getSignedMin() const {
+ if (isFullSet() || isSignWrappedSet())
+ return APInt::getSignedMinValue(getBitWidth());
+ return getLower();
+}
+
+bool ConstantRange::contains(const APInt &V) const {
+ if (Lower == Upper)
+ return isFullSet();
+
+ if (!isUpperWrapped())
+ return Lower.ule(V) && V.ult(Upper);
+ return Lower.ule(V) || V.ult(Upper);
+}
+
+bool ConstantRange::contains(const ConstantRange &Other) const {
+ if (isFullSet() || Other.isEmptySet()) return true;
+ if (isEmptySet() || Other.isFullSet()) return false;
+
+ if (!isUpperWrapped()) {
+ if (Other.isUpperWrapped())
+ return false;
+
+ return Lower.ule(Other.getLower()) && Other.getUpper().ule(Upper);
+ }
+
+ if (!Other.isUpperWrapped())
+ return Other.getUpper().ule(Upper) ||
+ Lower.ule(Other.getLower());
+
+ return Other.getUpper().ule(Upper) && Lower.ule(Other.getLower());
+}
+
+unsigned ConstantRange::getActiveBits() const {
+ if (isEmptySet())
+ return 0;
+
+ return getUnsignedMax().getActiveBits();
+}
+
+unsigned ConstantRange::getMinSignedBits() const {
+ if (isEmptySet())
+ return 0;
+
+ return std::max(getSignedMin().getMinSignedBits(),
+ getSignedMax().getMinSignedBits());
+}
+
+ConstantRange ConstantRange::subtract(const APInt &Val) const {
+ assert(Val.getBitWidth() == getBitWidth() && "Wrong bit width");
+ // If the set is empty or full, don't modify the endpoints.
+ if (Lower == Upper)
+ return *this;
+ return ConstantRange(Lower - Val, Upper - Val);
+}
+
+ConstantRange ConstantRange::difference(const ConstantRange &CR) const {
+ return intersectWith(CR.inverse());
+}
+
+static ConstantRange getPreferredRange(
+ const ConstantRange &CR1, const ConstantRange &CR2,
+ ConstantRange::PreferredRangeType Type) {
+ if (Type == ConstantRange::Unsigned) {
+ if (!CR1.isWrappedSet() && CR2.isWrappedSet())
+ return CR1;
+ if (CR1.isWrappedSet() && !CR2.isWrappedSet())
+ return CR2;
+ } else if (Type == ConstantRange::Signed) {
+ if (!CR1.isSignWrappedSet() && CR2.isSignWrappedSet())
+ return CR1;
+ if (CR1.isSignWrappedSet() && !CR2.isSignWrappedSet())
+ return CR2;
+ }
+
+ if (CR1.isSizeStrictlySmallerThan(CR2))
+ return CR1;
+ return CR2;
+}
+
+ConstantRange ConstantRange::intersectWith(const ConstantRange &CR,
+ PreferredRangeType Type) const {
+ assert(getBitWidth() == CR.getBitWidth() &&
+ "ConstantRange types don't agree!");
+
+ // Handle common cases.
+ if ( isEmptySet() || CR.isFullSet()) return *this;
+ if (CR.isEmptySet() || isFullSet()) return CR;
+
+ if (!isUpperWrapped() && CR.isUpperWrapped())
+ return CR.intersectWith(*this, Type);
+
+ if (!isUpperWrapped() && !CR.isUpperWrapped()) {
+ if (Lower.ult(CR.Lower)) {
+ // L---U : this
+ // L---U : CR
+ if (Upper.ule(CR.Lower))
+ return getEmpty();
+
+ // L---U : this
+ // L---U : CR
+ if (Upper.ult(CR.Upper))
+ return ConstantRange(CR.Lower, Upper);
+
+ // L-------U : this
+ // L---U : CR
+ return CR;
+ }
+ // L---U : this
+ // L-------U : CR
+ if (Upper.ult(CR.Upper))
+ return *this;
+
+ // L-----U : this
+ // L-----U : CR
+ if (Lower.ult(CR.Upper))
+ return ConstantRange(Lower, CR.Upper);
+
+ // L---U : this
+ // L---U : CR
+ return getEmpty();
+ }
+
+ if (isUpperWrapped() && !CR.isUpperWrapped()) {
+ if (CR.Lower.ult(Upper)) {
+ // ------U L--- : this
+ // L--U : CR
+ if (CR.Upper.ult(Upper))
+ return CR;
+
+ // ------U L--- : this
+ // L------U : CR
+ if (CR.Upper.ule(Lower))
+ return ConstantRange(CR.Lower, Upper);
+
+ // ------U L--- : this
+ // L----------U : CR
+ return getPreferredRange(*this, CR, Type);
+ }
+ if (CR.Lower.ult(Lower)) {
+ // --U L---- : this
+ // L--U : CR
+ if (CR.Upper.ule(Lower))
+ return getEmpty();
+
+ // --U L---- : this
+ // L------U : CR
+ return ConstantRange(Lower, CR.Upper);
+ }
+
+ // --U L------ : this
+ // L--U : CR
+ return CR;
+ }
+
+ if (CR.Upper.ult(Upper)) {
+ // ------U L-- : this
+ // --U L------ : CR
+ if (CR.Lower.ult(Upper))
+ return getPreferredRange(*this, CR, Type);
+
+ // ----U L-- : this
+ // --U L---- : CR
+ if (CR.Lower.ult(Lower))
+ return ConstantRange(Lower, CR.Upper);
+
+ // ----U L---- : this
+ // --U L-- : CR
+ return CR;
+ }
+ if (CR.Upper.ule(Lower)) {
+ // --U L-- : this
+ // ----U L---- : CR
+ if (CR.Lower.ult(Lower))
+ return *this;
+
+ // --U L---- : this
+ // ----U L-- : CR
+ return ConstantRange(CR.Lower, Upper);
+ }
+
+ // --U L------ : this
+ // ------U L-- : CR
+ return getPreferredRange(*this, CR, Type);
+}
+
+ConstantRange ConstantRange::unionWith(const ConstantRange &CR,
+ PreferredRangeType Type) const {
+ assert(getBitWidth() == CR.getBitWidth() &&
+ "ConstantRange types don't agree!");
+
+ if ( isFullSet() || CR.isEmptySet()) return *this;
+ if (CR.isFullSet() || isEmptySet()) return CR;
+
+ if (!isUpperWrapped() && CR.isUpperWrapped())
+ return CR.unionWith(*this, Type);
+
+ if (!isUpperWrapped() && !CR.isUpperWrapped()) {
+ // L---U and L---U : this
+ // L---U L---U : CR
+ // result in one of
+ // L---------U
+ // -----U L-----
+ if (CR.Upper.ult(Lower) || Upper.ult(CR.Lower))
+ return getPreferredRange(
+ ConstantRange(Lower, CR.Upper), ConstantRange(CR.Lower, Upper), Type);
+
+ APInt L = CR.Lower.ult(Lower) ? CR.Lower : Lower;
+ APInt U = (CR.Upper - 1).ugt(Upper - 1) ? CR.Upper : Upper;
+
+ if (L.isZero() && U.isZero())
+ return getFull();
+
+ return ConstantRange(std::move(L), std::move(U));
+ }
+
+ if (!CR.isUpperWrapped()) {
+ // ------U L----- and ------U L----- : this
+ // L--U L--U : CR
+ if (CR.Upper.ule(Upper) || CR.Lower.uge(Lower))
+ return *this;
+
+ // ------U L----- : this
+ // L---------U : CR
+ if (CR.Lower.ule(Upper) && Lower.ule(CR.Upper))
+ return getFull();
+
+ // ----U L---- : this
+ // L---U : CR
+ // results in one of
+ // ----------U L----
+ // ----U L----------
+ if (Upper.ult(CR.Lower) && CR.Upper.ult(Lower))
+ return getPreferredRange(
+ ConstantRange(Lower, CR.Upper), ConstantRange(CR.Lower, Upper), Type);
+
+ // ----U L----- : this
+ // L----U : CR
+ if (Upper.ult(CR.Lower) && Lower.ule(CR.Upper))
+ return ConstantRange(CR.Lower, Upper);
+
+ // ------U L---- : this
+ // L-----U : CR
+ assert(CR.Lower.ule(Upper) && CR.Upper.ult(Lower) &&
+ "ConstantRange::unionWith missed a case with one range wrapped");
+ return ConstantRange(Lower, CR.Upper);
+ }
+
+ // ------U L---- and ------U L---- : this
+ // -U L----------- and ------------U L : CR
+ if (CR.Lower.ule(Upper) || Lower.ule(CR.Upper))
+ return getFull();
+
+ APInt L = CR.Lower.ult(Lower) ? CR.Lower : Lower;
+ APInt U = CR.Upper.ugt(Upper) ? CR.Upper : Upper;
+
+ return ConstantRange(std::move(L), std::move(U));
+}
+
+Optional<ConstantRange>
+ConstantRange::exactIntersectWith(const ConstantRange &CR) const {
+ // TODO: This can be implemented more efficiently.
+ ConstantRange Result = intersectWith(CR);
+ if (Result == inverse().unionWith(CR.inverse()).inverse())
+ return Result;
+ return None;
+}
+
+Optional<ConstantRange>
+ConstantRange::exactUnionWith(const ConstantRange &CR) const {
+ // TODO: This can be implemented more efficiently.
+ ConstantRange Result = unionWith(CR);
+ if (Result == inverse().intersectWith(CR.inverse()).inverse())
+ return Result;
+ return None;
+}
+
+ConstantRange ConstantRange::castOp(Instruction::CastOps CastOp,
+ uint32_t ResultBitWidth) const {
+ switch (CastOp) {
+ default:
+ llvm_unreachable("unsupported cast type");
+ case Instruction::Trunc:
+ return truncate(ResultBitWidth);
+ case Instruction::SExt:
+ return signExtend(ResultBitWidth);
+ case Instruction::ZExt:
+ return zeroExtend(ResultBitWidth);
+ case Instruction::BitCast:
+ return *this;
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ if (getBitWidth() == ResultBitWidth)
+ return *this;
+ else
+ return getFull(ResultBitWidth);
+ case Instruction::UIToFP: {
+ // TODO: use input range if available
+ auto BW = getBitWidth();
+ APInt Min = APInt::getMinValue(BW);
+ APInt Max = APInt::getMaxValue(BW);
+ if (ResultBitWidth > BW) {
+ Min = Min.zext(ResultBitWidth);
+ Max = Max.zext(ResultBitWidth);
+ }
+ return ConstantRange(std::move(Min), std::move(Max));
+ }
+ case Instruction::SIToFP: {
+ // TODO: use input range if available
+ auto BW = getBitWidth();
+ APInt SMin = APInt::getSignedMinValue(BW);
+ APInt SMax = APInt::getSignedMaxValue(BW);
+ if (ResultBitWidth > BW) {
+ SMin = SMin.sext(ResultBitWidth);
+ SMax = SMax.sext(ResultBitWidth);
+ }
+ return ConstantRange(std::move(SMin), std::move(SMax));
+ }
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::IntToPtr:
+ case Instruction::PtrToInt:
+ case Instruction::AddrSpaceCast:
+ // Conservatively return getFull set.
+ return getFull(ResultBitWidth);
+ };
+}
+
+ConstantRange ConstantRange::zeroExtend(uint32_t DstTySize) const {
+ if (isEmptySet()) return getEmpty(DstTySize);
+
+ unsigned SrcTySize = getBitWidth();
+ assert(SrcTySize < DstTySize && "Not a value extension");
+ if (isFullSet() || isUpperWrapped()) {
+ // Change into [0, 1 << src bit width)
+ APInt LowerExt(DstTySize, 0);
+ if (!Upper) // special case: [X, 0) -- not really wrapping around
+ LowerExt = Lower.zext(DstTySize);
+ return ConstantRange(std::move(LowerExt),
+ APInt::getOneBitSet(DstTySize, SrcTySize));
+ }
+
+ return ConstantRange(Lower.zext(DstTySize), Upper.zext(DstTySize));
+}
+
+ConstantRange ConstantRange::signExtend(uint32_t DstTySize) const {
+ if (isEmptySet()) return getEmpty(DstTySize);
+
+ unsigned SrcTySize = getBitWidth();
+ assert(SrcTySize < DstTySize && "Not a value extension");
+
+ // special case: [X, INT_MIN) -- not really wrapping around
+ if (Upper.isMinSignedValue())
+ return ConstantRange(Lower.sext(DstTySize), Upper.zext(DstTySize));
+
+ if (isFullSet() || isSignWrappedSet()) {
+ return ConstantRange(APInt::getHighBitsSet(DstTySize,DstTySize-SrcTySize+1),
+ APInt::getLowBitsSet(DstTySize, SrcTySize-1) + 1);
+ }
+
+ return ConstantRange(Lower.sext(DstTySize), Upper.sext(DstTySize));
+}
+
+ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
+ assert(getBitWidth() > DstTySize && "Not a value truncation");
+ if (isEmptySet())
+ return getEmpty(DstTySize);
+ if (isFullSet())
+ return getFull(DstTySize);
+
+ APInt LowerDiv(Lower), UpperDiv(Upper);
+ ConstantRange Union(DstTySize, /*isFullSet=*/false);
+
+ // Analyze wrapped sets in their two parts: [0, Upper) \/ [Lower, MaxValue]
+ // We use the non-wrapped set code to analyze the [Lower, MaxValue) part, and
+ // then we do the union with [MaxValue, Upper)
+ if (isUpperWrapped()) {
+ // If Upper is greater than or equal to MaxValue(DstTy), it covers the whole
+ // truncated range.
+ if (Upper.getActiveBits() > DstTySize ||
+ Upper.countTrailingOnes() == DstTySize)
+ return getFull(DstTySize);
+
+ Union = ConstantRange(APInt::getMaxValue(DstTySize),Upper.trunc(DstTySize));
+ UpperDiv.setAllBits();
+
+ // Union covers the MaxValue case, so return if the remaining range is just
+ // MaxValue(DstTy).
+ if (LowerDiv == UpperDiv)
+ return Union;
+ }
+
+ // Chop off the most significant bits that are past the destination bitwidth.
+ if (LowerDiv.getActiveBits() > DstTySize) {
+ // Mask to just the signficant bits and subtract from LowerDiv/UpperDiv.
+ APInt Adjust = LowerDiv & APInt::getBitsSetFrom(getBitWidth(), DstTySize);
+ LowerDiv -= Adjust;
+ UpperDiv -= Adjust;
+ }
+
+ unsigned UpperDivWidth = UpperDiv.getActiveBits();
+ if (UpperDivWidth <= DstTySize)
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperDiv.trunc(DstTySize)).unionWith(Union);
+
+ // The truncated value wraps around. Check if we can do better than fullset.
+ if (UpperDivWidth == DstTySize + 1) {
+ // Clear the MSB so that UpperDiv wraps around.
+ UpperDiv.clearBit(DstTySize);
+ if (UpperDiv.ult(LowerDiv))
+ return ConstantRange(LowerDiv.trunc(DstTySize),
+ UpperDiv.trunc(DstTySize)).unionWith(Union);
+ }
+
+ return getFull(DstTySize);
+}
+
+ConstantRange ConstantRange::zextOrTrunc(uint32_t DstTySize) const {
+ unsigned SrcTySize = getBitWidth();
+ if (SrcTySize > DstTySize)
+ return truncate(DstTySize);
+ if (SrcTySize < DstTySize)
+ return zeroExtend(DstTySize);
+ return *this;
+}
+
+ConstantRange ConstantRange::sextOrTrunc(uint32_t DstTySize) const {
+ unsigned SrcTySize = getBitWidth();
+ if (SrcTySize > DstTySize)
+ return truncate(DstTySize);
+ if (SrcTySize < DstTySize)
+ return signExtend(DstTySize);
+ return *this;
+}
+
+ConstantRange ConstantRange::binaryOp(Instruction::BinaryOps BinOp,
+ const ConstantRange &Other) const {
+ assert(Instruction::isBinaryOp(BinOp) && "Binary operators only!");
+
+ switch (BinOp) {
+ case Instruction::Add:
+ return add(Other);
+ case Instruction::Sub:
+ return sub(Other);
+ case Instruction::Mul:
+ return multiply(Other);
+ case Instruction::UDiv:
+ return udiv(Other);
+ case Instruction::SDiv:
+ return sdiv(Other);
+ case Instruction::URem:
+ return urem(Other);
+ case Instruction::SRem:
+ return srem(Other);
+ case Instruction::Shl:
+ return shl(Other);
+ case Instruction::LShr:
+ return lshr(Other);
+ case Instruction::AShr:
+ return ashr(Other);
+ case Instruction::And:
+ return binaryAnd(Other);
+ case Instruction::Or:
+ return binaryOr(Other);
+ case Instruction::Xor:
+ return binaryXor(Other);
+ // Note: floating point operations applied to abstract ranges are just
+ // ideal integer operations with a lossy representation
+ case Instruction::FAdd:
+ return add(Other);
+ case Instruction::FSub:
+ return sub(Other);
+ case Instruction::FMul:
+ return multiply(Other);
+ default:
+ // Conservatively return getFull set.
+ return getFull();
+ }
+}
+
+ConstantRange ConstantRange::overflowingBinaryOp(Instruction::BinaryOps BinOp,
+ const ConstantRange &Other,
+ unsigned NoWrapKind) const {
+ assert(Instruction::isBinaryOp(BinOp) && "Binary operators only!");
+
+ switch (BinOp) {
+ case Instruction::Add:
+ return addWithNoWrap(Other, NoWrapKind);
+ case Instruction::Sub:
+ return subWithNoWrap(Other, NoWrapKind);
+ default:
+ // Don't know about this Overflowing Binary Operation.
+ // Conservatively fallback to plain binop handling.
+ return binaryOp(BinOp, Other);
+ }
+}
+
+bool ConstantRange::isIntrinsicSupported(Intrinsic::ID IntrinsicID) {
+ switch (IntrinsicID) {
+ case Intrinsic::uadd_sat:
+ case Intrinsic::usub_sat:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::ssub_sat:
+ case Intrinsic::umin:
+ case Intrinsic::umax:
+ case Intrinsic::smin:
+ case Intrinsic::smax:
+ case Intrinsic::abs:
+ return true;
+ default:
+ return false;
+ }
+}
+
+ConstantRange ConstantRange::intrinsic(Intrinsic::ID IntrinsicID,
+ ArrayRef<ConstantRange> Ops) {
+ switch (IntrinsicID) {
+ case Intrinsic::uadd_sat:
+ return Ops[0].uadd_sat(Ops[1]);
+ case Intrinsic::usub_sat:
+ return Ops[0].usub_sat(Ops[1]);
+ case Intrinsic::sadd_sat:
+ return Ops[0].sadd_sat(Ops[1]);
+ case Intrinsic::ssub_sat:
+ return Ops[0].ssub_sat(Ops[1]);
+ case Intrinsic::umin:
+ return Ops[0].umin(Ops[1]);
+ case Intrinsic::umax:
+ return Ops[0].umax(Ops[1]);
+ case Intrinsic::smin:
+ return Ops[0].smin(Ops[1]);
+ case Intrinsic::smax:
+ return Ops[0].smax(Ops[1]);
+ case Intrinsic::abs: {
+ const APInt *IntMinIsPoison = Ops[1].getSingleElement();
+ assert(IntMinIsPoison && "Must be known (immarg)");
+ assert(IntMinIsPoison->getBitWidth() == 1 && "Must be boolean");
+ return Ops[0].abs(IntMinIsPoison->getBoolValue());
+ }
+ default:
+ assert(!isIntrinsicSupported(IntrinsicID) && "Shouldn't be supported");
+ llvm_unreachable("Unsupported intrinsic");
+ }
+}
+
+ConstantRange
+ConstantRange::add(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ if (isFullSet() || Other.isFullSet())
+ return getFull();
+
+ APInt NewLower = getLower() + Other.getLower();
+ APInt NewUpper = getUpper() + Other.getUpper() - 1;
+ if (NewLower == NewUpper)
+ return getFull();
+
+ ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
+ // We've wrapped, therefore, full set.
+ return getFull();
+ return X;
+}
+
+ConstantRange ConstantRange::addWithNoWrap(const ConstantRange &Other,
+ unsigned NoWrapKind,
+ PreferredRangeType RangeType) const {
+ // Calculate the range for "X + Y" which is guaranteed not to wrap(overflow).
+ // (X is from this, and Y is from Other)
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ if (isFullSet() && Other.isFullSet())
+ return getFull();
+
+ using OBO = OverflowingBinaryOperator;
+ ConstantRange Result = add(Other);
+
+ // If an overflow happens for every value pair in these two constant ranges,
+ // we must return Empty set. In this case, we get that for free, because we
+ // get lucky that intersection of add() with uadd_sat()/sadd_sat() results
+ // in an empty set.
+
+ if (NoWrapKind & OBO::NoSignedWrap)
+ Result = Result.intersectWith(sadd_sat(Other), RangeType);
+
+ if (NoWrapKind & OBO::NoUnsignedWrap)
+ Result = Result.intersectWith(uadd_sat(Other), RangeType);
+
+ return Result;
+}
+
+ConstantRange
+ConstantRange::sub(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ if (isFullSet() || Other.isFullSet())
+ return getFull();
+
+ APInt NewLower = getLower() - Other.getUpper() + 1;
+ APInt NewUpper = getUpper() - Other.getLower();
+ if (NewLower == NewUpper)
+ return getFull();
+
+ ConstantRange X = ConstantRange(std::move(NewLower), std::move(NewUpper));
+ if (X.isSizeStrictlySmallerThan(*this) ||
+ X.isSizeStrictlySmallerThan(Other))
+ // We've wrapped, therefore, full set.
+ return getFull();
+ return X;
+}
+
+ConstantRange ConstantRange::subWithNoWrap(const ConstantRange &Other,
+ unsigned NoWrapKind,
+ PreferredRangeType RangeType) const {
+ // Calculate the range for "X - Y" which is guaranteed not to wrap(overflow).
+ // (X is from this, and Y is from Other)
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ if (isFullSet() && Other.isFullSet())
+ return getFull();
+
+ using OBO = OverflowingBinaryOperator;
+ ConstantRange Result = sub(Other);
+
+ // If an overflow happens for every value pair in these two constant ranges,
+ // we must return Empty set. In signed case, we get that for free, because we
+ // get lucky that intersection of sub() with ssub_sat() results in an
+ // empty set. But for unsigned we must perform the overflow check manually.
+
+ if (NoWrapKind & OBO::NoSignedWrap)
+ Result = Result.intersectWith(ssub_sat(Other), RangeType);
+
+ if (NoWrapKind & OBO::NoUnsignedWrap) {
+ if (getUnsignedMax().ult(Other.getUnsignedMin()))
+ return getEmpty(); // Always overflows.
+ Result = Result.intersectWith(usub_sat(Other), RangeType);
+ }
+
+ return Result;
+}
+
+ConstantRange
+ConstantRange::multiply(const ConstantRange &Other) const {
+ // TODO: If either operand is a single element and the multiply is known to
+ // be non-wrapping, round the result min and max value to the appropriate
+ // multiple of that element. If wrapping is possible, at least adjust the
+ // range according to the greatest power-of-two factor of the single element.
+
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ // Multiplication is signedness-independent. However different ranges can be
+ // obtained depending on how the input ranges are treated. These different
+ // ranges are all conservatively correct, but one might be better than the
+ // other. We calculate two ranges; one treating the inputs as unsigned
+ // and the other signed, then return the smallest of these ranges.
+
+ // Unsigned range first.
+ APInt this_min = getUnsignedMin().zext(getBitWidth() * 2);
+ APInt this_max = getUnsignedMax().zext(getBitWidth() * 2);
+ APInt Other_min = Other.getUnsignedMin().zext(getBitWidth() * 2);
+ APInt Other_max = Other.getUnsignedMax().zext(getBitWidth() * 2);
+
+ ConstantRange Result_zext = ConstantRange(this_min * Other_min,
+ this_max * Other_max + 1);
+ ConstantRange UR = Result_zext.truncate(getBitWidth());
+
+ // If the unsigned range doesn't wrap, and isn't negative then it's a range
+ // from one positive number to another which is as good as we can generate.
+ // In this case, skip the extra work of generating signed ranges which aren't
+ // going to be better than this range.
+ if (!UR.isUpperWrapped() &&
+ (UR.getUpper().isNonNegative() || UR.getUpper().isMinSignedValue()))
+ return UR;
+
+ // Now the signed range. Because we could be dealing with negative numbers
+ // here, the lower bound is the smallest of the cartesian product of the
+ // lower and upper ranges; for example:
+ // [-1,4) * [-2,3) = min(-1*-2, -1*2, 3*-2, 3*2) = -6.
+ // Similarly for the upper bound, swapping min for max.
+
+ this_min = getSignedMin().sext(getBitWidth() * 2);
+ this_max = getSignedMax().sext(getBitWidth() * 2);
+ Other_min = Other.getSignedMin().sext(getBitWidth() * 2);
+ Other_max = Other.getSignedMax().sext(getBitWidth() * 2);
+
+ auto L = {this_min * Other_min, this_min * Other_max,
+ this_max * Other_min, this_max * Other_max};
+ auto Compare = [](const APInt &A, const APInt &B) { return A.slt(B); };
+ ConstantRange Result_sext(std::min(L, Compare), std::max(L, Compare) + 1);
+ ConstantRange SR = Result_sext.truncate(getBitWidth());
+
+ return UR.isSizeStrictlySmallerThan(SR) ? UR : SR;
+}
+
+ConstantRange ConstantRange::smul_fast(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt Min = getSignedMin();
+ APInt Max = getSignedMax();
+ APInt OtherMin = Other.getSignedMin();
+ APInt OtherMax = Other.getSignedMax();
+
+ bool O1, O2, O3, O4;
+ auto Muls = {Min.smul_ov(OtherMin, O1), Min.smul_ov(OtherMax, O2),
+ Max.smul_ov(OtherMin, O3), Max.smul_ov(OtherMax, O4)};
+ if (O1 || O2 || O3 || O4)
+ return getFull();
+
+ auto Compare = [](const APInt &A, const APInt &B) { return A.slt(B); };
+ return getNonEmpty(std::min(Muls, Compare), std::max(Muls, Compare) + 1);
+}
+
+ConstantRange
+ConstantRange::smax(const ConstantRange &Other) const {
+ // X smax Y is: range(smax(X_smin, Y_smin),
+ // smax(X_smax, Y_smax))
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ APInt NewL = APIntOps::smax(getSignedMin(), Other.getSignedMin());
+ APInt NewU = APIntOps::smax(getSignedMax(), Other.getSignedMax()) + 1;
+ ConstantRange Res = getNonEmpty(std::move(NewL), std::move(NewU));
+ if (isSignWrappedSet() || Other.isSignWrappedSet())
+ return Res.intersectWith(unionWith(Other, Signed), Signed);
+ return Res;
+}
+
+ConstantRange
+ConstantRange::umax(const ConstantRange &Other) const {
+ // X umax Y is: range(umax(X_umin, Y_umin),
+ // umax(X_umax, Y_umax))
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ APInt NewL = APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin());
+ APInt NewU = APIntOps::umax(getUnsignedMax(), Other.getUnsignedMax()) + 1;
+ ConstantRange Res = getNonEmpty(std::move(NewL), std::move(NewU));
+ if (isWrappedSet() || Other.isWrappedSet())
+ return Res.intersectWith(unionWith(Other, Unsigned), Unsigned);
+ return Res;
+}
+
+ConstantRange
+ConstantRange::smin(const ConstantRange &Other) const {
+ // X smin Y is: range(smin(X_smin, Y_smin),
+ // smin(X_smax, Y_smax))
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ APInt NewL = APIntOps::smin(getSignedMin(), Other.getSignedMin());
+ APInt NewU = APIntOps::smin(getSignedMax(), Other.getSignedMax()) + 1;
+ ConstantRange Res = getNonEmpty(std::move(NewL), std::move(NewU));
+ if (isSignWrappedSet() || Other.isSignWrappedSet())
+ return Res.intersectWith(unionWith(Other, Signed), Signed);
+ return Res;
+}
+
+ConstantRange
+ConstantRange::umin(const ConstantRange &Other) const {
+ // X umin Y is: range(umin(X_umin, Y_umin),
+ // umin(X_umax, Y_umax))
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ APInt NewL = APIntOps::umin(getUnsignedMin(), Other.getUnsignedMin());
+ APInt NewU = APIntOps::umin(getUnsignedMax(), Other.getUnsignedMax()) + 1;
+ ConstantRange Res = getNonEmpty(std::move(NewL), std::move(NewU));
+ if (isWrappedSet() || Other.isWrappedSet())
+ return Res.intersectWith(unionWith(Other, Unsigned), Unsigned);
+ return Res;
+}
+
+ConstantRange
+ConstantRange::udiv(const ConstantRange &RHS) const {
+ if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isZero())
+ return getEmpty();
+
+ APInt Lower = getUnsignedMin().udiv(RHS.getUnsignedMax());
+
+ APInt RHS_umin = RHS.getUnsignedMin();
+ if (RHS_umin.isZero()) {
+ // We want the lowest value in RHS excluding zero. Usually that would be 1
+ // except for a range in the form of [X, 1) in which case it would be X.
+ if (RHS.getUpper() == 1)
+ RHS_umin = RHS.getLower();
+ else
+ RHS_umin = 1;
+ }
+
+ APInt Upper = getUnsignedMax().udiv(RHS_umin) + 1;
+ return getNonEmpty(std::move(Lower), std::move(Upper));
+}
+
+ConstantRange ConstantRange::sdiv(const ConstantRange &RHS) const {
+ // We split up the LHS and RHS into positive and negative components
+ // and then also compute the positive and negative components of the result
+ // separately by combining division results with the appropriate signs.
+ APInt Zero = APInt::getZero(getBitWidth());
+ APInt SignedMin = APInt::getSignedMinValue(getBitWidth());
+ // There are no positive 1-bit values. The 1 would get interpreted as -1.
+ ConstantRange PosFilter =
+ getBitWidth() == 1 ? getEmpty()
+ : ConstantRange(APInt(getBitWidth(), 1), SignedMin);
+ ConstantRange NegFilter(SignedMin, Zero);
+ ConstantRange PosL = intersectWith(PosFilter);
+ ConstantRange NegL = intersectWith(NegFilter);
+ ConstantRange PosR = RHS.intersectWith(PosFilter);
+ ConstantRange NegR = RHS.intersectWith(NegFilter);
+
+ ConstantRange PosRes = getEmpty();
+ if (!PosL.isEmptySet() && !PosR.isEmptySet())
+ // pos / pos = pos.
+ PosRes = ConstantRange(PosL.Lower.sdiv(PosR.Upper - 1),
+ (PosL.Upper - 1).sdiv(PosR.Lower) + 1);
+
+ if (!NegL.isEmptySet() && !NegR.isEmptySet()) {
+ // neg / neg = pos.
+ //
+ // We need to deal with one tricky case here: SignedMin / -1 is UB on the
+ // IR level, so we'll want to exclude this case when calculating bounds.
+ // (For APInts the operation is well-defined and yields SignedMin.) We
+ // handle this by dropping either SignedMin from the LHS or -1 from the RHS.
+ APInt Lo = (NegL.Upper - 1).sdiv(NegR.Lower);
+ if (NegL.Lower.isMinSignedValue() && NegR.Upper.isZero()) {
+ // Remove -1 from the LHS. Skip if it's the only element, as this would
+ // leave us with an empty set.
+ if (!NegR.Lower.isAllOnes()) {
+ APInt AdjNegRUpper;
+ if (RHS.Lower.isAllOnes())
+ // Negative part of [-1, X] without -1 is [SignedMin, X].
+ AdjNegRUpper = RHS.Upper;
+ else
+ // [X, -1] without -1 is [X, -2].
+ AdjNegRUpper = NegR.Upper - 1;
+
+ PosRes = PosRes.unionWith(
+ ConstantRange(Lo, NegL.Lower.sdiv(AdjNegRUpper - 1) + 1));
+ }
+
+ // Remove SignedMin from the RHS. Skip if it's the only element, as this
+ // would leave us with an empty set.
+ if (NegL.Upper != SignedMin + 1) {
+ APInt AdjNegLLower;
+ if (Upper == SignedMin + 1)
+ // Negative part of [X, SignedMin] without SignedMin is [X, -1].
+ AdjNegLLower = Lower;
+ else
+ // [SignedMin, X] without SignedMin is [SignedMin + 1, X].
+ AdjNegLLower = NegL.Lower + 1;
+
+ PosRes = PosRes.unionWith(
+ ConstantRange(std::move(Lo),
+ AdjNegLLower.sdiv(NegR.Upper - 1) + 1));
+ }
+ } else {
+ PosRes = PosRes.unionWith(
+ ConstantRange(std::move(Lo), NegL.Lower.sdiv(NegR.Upper - 1) + 1));
+ }
+ }
+
+ ConstantRange NegRes = getEmpty();
+ if (!PosL.isEmptySet() && !NegR.isEmptySet())
+ // pos / neg = neg.
+ NegRes = ConstantRange((PosL.Upper - 1).sdiv(NegR.Upper - 1),
+ PosL.Lower.sdiv(NegR.Lower) + 1);
+
+ if (!NegL.isEmptySet() && !PosR.isEmptySet())
+ // neg / pos = neg.
+ NegRes = NegRes.unionWith(
+ ConstantRange(NegL.Lower.sdiv(PosR.Lower),
+ (NegL.Upper - 1).sdiv(PosR.Upper - 1) + 1));
+
+ // Prefer a non-wrapping signed range here.
+ ConstantRange Res = NegRes.unionWith(PosRes, PreferredRangeType::Signed);
+
+ // Preserve the zero that we dropped when splitting the LHS by sign.
+ if (contains(Zero) && (!PosR.isEmptySet() || !NegR.isEmptySet()))
+ Res = Res.unionWith(ConstantRange(Zero));
+ return Res;
+}
+
+ConstantRange ConstantRange::urem(const ConstantRange &RHS) const {
+ if (isEmptySet() || RHS.isEmptySet() || RHS.getUnsignedMax().isZero())
+ return getEmpty();
+
+ if (const APInt *RHSInt = RHS.getSingleElement()) {
+ // UREM by null is UB.
+ if (RHSInt->isZero())
+ return getEmpty();
+ // Use APInt's implementation of UREM for single element ranges.
+ if (const APInt *LHSInt = getSingleElement())
+ return {LHSInt->urem(*RHSInt)};
+ }
+
+ // L % R for L < R is L.
+ if (getUnsignedMax().ult(RHS.getUnsignedMin()))
+ return *this;
+
+ // L % R is <= L and < R.
+ APInt Upper = APIntOps::umin(getUnsignedMax(), RHS.getUnsignedMax() - 1) + 1;
+ return getNonEmpty(APInt::getZero(getBitWidth()), std::move(Upper));
+}
+
+ConstantRange ConstantRange::srem(const ConstantRange &RHS) const {
+ if (isEmptySet() || RHS.isEmptySet())
+ return getEmpty();
+
+ if (const APInt *RHSInt = RHS.getSingleElement()) {
+ // SREM by null is UB.
+ if (RHSInt->isZero())
+ return getEmpty();
+ // Use APInt's implementation of SREM for single element ranges.
+ if (const APInt *LHSInt = getSingleElement())
+ return {LHSInt->srem(*RHSInt)};
+ }
+
+ ConstantRange AbsRHS = RHS.abs();
+ APInt MinAbsRHS = AbsRHS.getUnsignedMin();
+ APInt MaxAbsRHS = AbsRHS.getUnsignedMax();
+
+ // Modulus by zero is UB.
+ if (MaxAbsRHS.isZero())
+ return getEmpty();
+
+ if (MinAbsRHS.isZero())
+ ++MinAbsRHS;
+
+ APInt MinLHS = getSignedMin(), MaxLHS = getSignedMax();
+
+ if (MinLHS.isNonNegative()) {
+ // L % R for L < R is L.
+ if (MaxLHS.ult(MinAbsRHS))
+ return *this;
+
+ // L % R is <= L and < R.
+ APInt Upper = APIntOps::umin(MaxLHS, MaxAbsRHS - 1) + 1;
+ return ConstantRange(APInt::getZero(getBitWidth()), std::move(Upper));
+ }
+
+ // Same basic logic as above, but the result is negative.
+ if (MaxLHS.isNegative()) {
+ if (MinLHS.ugt(-MinAbsRHS))
+ return *this;
+
+ APInt Lower = APIntOps::umax(MinLHS, -MaxAbsRHS + 1);
+ return ConstantRange(std::move(Lower), APInt(getBitWidth(), 1));
+ }
+
+ // LHS range crosses zero.
+ APInt Lower = APIntOps::umax(MinLHS, -MaxAbsRHS + 1);
+ APInt Upper = APIntOps::umin(MaxLHS, MaxAbsRHS - 1) + 1;
+ return ConstantRange(std::move(Lower), std::move(Upper));
+}
+
+ConstantRange ConstantRange::binaryNot() const {
+ return ConstantRange(APInt::getAllOnes(getBitWidth())).sub(*this);
+}
+
+ConstantRange ConstantRange::binaryAnd(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ ConstantRange KnownBitsRange =
+ fromKnownBits(toKnownBits() & Other.toKnownBits(), false);
+ ConstantRange UMinUMaxRange =
+ getNonEmpty(APInt::getZero(getBitWidth()),
+ APIntOps::umin(Other.getUnsignedMax(), getUnsignedMax()) + 1);
+ return KnownBitsRange.intersectWith(UMinUMaxRange);
+}
+
+ConstantRange ConstantRange::binaryOr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ ConstantRange KnownBitsRange =
+ fromKnownBits(toKnownBits() | Other.toKnownBits(), false);
+ // Upper wrapped range.
+ ConstantRange UMaxUMinRange =
+ getNonEmpty(APIntOps::umax(getUnsignedMin(), Other.getUnsignedMin()),
+ APInt::getZero(getBitWidth()));
+ return KnownBitsRange.intersectWith(UMaxUMinRange);
+}
+
+ConstantRange ConstantRange::binaryXor(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ // Use APInt's implementation of XOR for single element ranges.
+ if (isSingleElement() && Other.isSingleElement())
+ return {*getSingleElement() ^ *Other.getSingleElement()};
+
+ // Special-case binary complement, since we can give a precise answer.
+ if (Other.isSingleElement() && Other.getSingleElement()->isAllOnes())
+ return binaryNot();
+ if (isSingleElement() && getSingleElement()->isAllOnes())
+ return Other.binaryNot();
+
+ return fromKnownBits(toKnownBits() ^ Other.toKnownBits(), /*IsSigned*/false);
+}
+
+ConstantRange
+ConstantRange::shl(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt Min = getUnsignedMin();
+ APInt Max = getUnsignedMax();
+ if (const APInt *RHS = Other.getSingleElement()) {
+ unsigned BW = getBitWidth();
+ if (RHS->uge(BW))
+ return getEmpty();
+
+ unsigned EqualLeadingBits = (Min ^ Max).countLeadingZeros();
+ if (RHS->ule(EqualLeadingBits))
+ return getNonEmpty(Min << *RHS, (Max << *RHS) + 1);
+
+ return getNonEmpty(APInt::getZero(BW),
+ APInt::getBitsSetFrom(BW, RHS->getZExtValue()) + 1);
+ }
+
+ APInt OtherMax = Other.getUnsignedMax();
+
+ // There's overflow!
+ if (OtherMax.ugt(Max.countLeadingZeros()))
+ return getFull();
+
+ // FIXME: implement the other tricky cases
+
+ Min <<= Other.getUnsignedMin();
+ Max <<= OtherMax;
+
+ return ConstantRange::getNonEmpty(std::move(Min), std::move(Max) + 1);
+}
+
+ConstantRange
+ConstantRange::lshr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt max = getUnsignedMax().lshr(Other.getUnsignedMin()) + 1;
+ APInt min = getUnsignedMin().lshr(Other.getUnsignedMax());
+ return getNonEmpty(std::move(min), std::move(max));
+}
+
+ConstantRange
+ConstantRange::ashr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ // May straddle zero, so handle both positive and negative cases.
+ // 'PosMax' is the upper bound of the result of the ashr
+ // operation, when Upper of the LHS of ashr is a non-negative.
+ // number. Since ashr of a non-negative number will result in a
+ // smaller number, the Upper value of LHS is shifted right with
+ // the minimum value of 'Other' instead of the maximum value.
+ APInt PosMax = getSignedMax().ashr(Other.getUnsignedMin()) + 1;
+
+ // 'PosMin' is the lower bound of the result of the ashr
+ // operation, when Lower of the LHS is a non-negative number.
+ // Since ashr of a non-negative number will result in a smaller
+ // number, the Lower value of LHS is shifted right with the
+ // maximum value of 'Other'.
+ APInt PosMin = getSignedMin().ashr(Other.getUnsignedMax());
+
+ // 'NegMax' is the upper bound of the result of the ashr
+ // operation, when Upper of the LHS of ashr is a negative number.
+ // Since 'ashr' of a negative number will result in a bigger
+ // number, the Upper value of LHS is shifted right with the
+ // maximum value of 'Other'.
+ APInt NegMax = getSignedMax().ashr(Other.getUnsignedMax()) + 1;
+
+ // 'NegMin' is the lower bound of the result of the ashr
+ // operation, when Lower of the LHS of ashr is a negative number.
+ // Since 'ashr' of a negative number will result in a bigger
+ // number, the Lower value of LHS is shifted right with the
+ // minimum value of 'Other'.
+ APInt NegMin = getSignedMin().ashr(Other.getUnsignedMin());
+
+ APInt max, min;
+ if (getSignedMin().isNonNegative()) {
+ // Upper and Lower of LHS are non-negative.
+ min = PosMin;
+ max = PosMax;
+ } else if (getSignedMax().isNegative()) {
+ // Upper and Lower of LHS are negative.
+ min = NegMin;
+ max = NegMax;
+ } else {
+ // Upper is non-negative and Lower is negative.
+ min = NegMin;
+ max = PosMax;
+ }
+ return getNonEmpty(std::move(min), std::move(max));
+}
+
+ConstantRange ConstantRange::uadd_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt NewL = getUnsignedMin().uadd_sat(Other.getUnsignedMin());
+ APInt NewU = getUnsignedMax().uadd_sat(Other.getUnsignedMax()) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::sadd_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt NewL = getSignedMin().sadd_sat(Other.getSignedMin());
+ APInt NewU = getSignedMax().sadd_sat(Other.getSignedMax()) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::usub_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt NewL = getUnsignedMin().usub_sat(Other.getUnsignedMax());
+ APInt NewU = getUnsignedMax().usub_sat(Other.getUnsignedMin()) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::ssub_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt NewL = getSignedMin().ssub_sat(Other.getSignedMax());
+ APInt NewU = getSignedMax().ssub_sat(Other.getSignedMin()) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::umul_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt NewL = getUnsignedMin().umul_sat(Other.getUnsignedMin());
+ APInt NewU = getUnsignedMax().umul_sat(Other.getUnsignedMax()) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::smul_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ // Because we could be dealing with negative numbers here, the lower bound is
+ // the smallest of the cartesian product of the lower and upper ranges;
+ // for example:
+ // [-1,4) * [-2,3) = min(-1*-2, -1*2, 3*-2, 3*2) = -6.
+ // Similarly for the upper bound, swapping min for max.
+
+ APInt Min = getSignedMin();
+ APInt Max = getSignedMax();
+ APInt OtherMin = Other.getSignedMin();
+ APInt OtherMax = Other.getSignedMax();
+
+ auto L = {Min.smul_sat(OtherMin), Min.smul_sat(OtherMax),
+ Max.smul_sat(OtherMin), Max.smul_sat(OtherMax)};
+ auto Compare = [](const APInt &A, const APInt &B) { return A.slt(B); };
+ return getNonEmpty(std::min(L, Compare), std::max(L, Compare) + 1);
+}
+
+ConstantRange ConstantRange::ushl_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt NewL = getUnsignedMin().ushl_sat(Other.getUnsignedMin());
+ APInt NewU = getUnsignedMax().ushl_sat(Other.getUnsignedMax()) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::sshl_sat(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+
+ APInt Min = getSignedMin(), Max = getSignedMax();
+ APInt ShAmtMin = Other.getUnsignedMin(), ShAmtMax = Other.getUnsignedMax();
+ APInt NewL = Min.sshl_sat(Min.isNonNegative() ? ShAmtMin : ShAmtMax);
+ APInt NewU = Max.sshl_sat(Max.isNegative() ? ShAmtMin : ShAmtMax) + 1;
+ return getNonEmpty(std::move(NewL), std::move(NewU));
+}
+
+ConstantRange ConstantRange::inverse() const {
+ if (isFullSet())
+ return getEmpty();
+ if (isEmptySet())
+ return getFull();
+ return ConstantRange(Upper, Lower);
+}
+
+ConstantRange ConstantRange::abs(bool IntMinIsPoison) const {
+ if (isEmptySet())
+ return getEmpty();
+
+ if (isSignWrappedSet()) {
+ APInt Lo;
+ // Check whether the range crosses zero.
+ if (Upper.isStrictlyPositive() || !Lower.isStrictlyPositive())
+ Lo = APInt::getZero(getBitWidth());
+ else
+ Lo = APIntOps::umin(Lower, -Upper + 1);
+
+ // If SignedMin is not poison, then it is included in the result range.
+ if (IntMinIsPoison)
+ return ConstantRange(Lo, APInt::getSignedMinValue(getBitWidth()));
+ else
+ return ConstantRange(Lo, APInt::getSignedMinValue(getBitWidth()) + 1);
+ }
+
+ APInt SMin = getSignedMin(), SMax = getSignedMax();
+
+ // Skip SignedMin if it is poison.
+ if (IntMinIsPoison && SMin.isMinSignedValue()) {
+ // The range may become empty if it *only* contains SignedMin.
+ if (SMax.isMinSignedValue())
+ return getEmpty();
+ ++SMin;
+ }
+
+ // All non-negative.
+ if (SMin.isNonNegative())
+ return *this;
+
+ // All negative.
+ if (SMax.isNegative())
+ return ConstantRange(-SMax, -SMin + 1);
+
+ // Range crosses zero.
+ return ConstantRange(APInt::getZero(getBitWidth()),
+ APIntOps::umax(-SMin, SMax) + 1);
+}
+
+ConstantRange::OverflowResult ConstantRange::unsignedAddMayOverflow(
+ const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return OverflowResult::MayOverflow;
+
+ APInt Min = getUnsignedMin(), Max = getUnsignedMax();
+ APInt OtherMin = Other.getUnsignedMin(), OtherMax = Other.getUnsignedMax();
+
+ // a u+ b overflows high iff a u> ~b.
+ if (Min.ugt(~OtherMin))
+ return OverflowResult::AlwaysOverflowsHigh;
+ if (Max.ugt(~OtherMax))
+ return OverflowResult::MayOverflow;
+ return OverflowResult::NeverOverflows;
+}
+
+ConstantRange::OverflowResult ConstantRange::signedAddMayOverflow(
+ const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return OverflowResult::MayOverflow;
+
+ APInt Min = getSignedMin(), Max = getSignedMax();
+ APInt OtherMin = Other.getSignedMin(), OtherMax = Other.getSignedMax();
+
+ APInt SignedMin = APInt::getSignedMinValue(getBitWidth());
+ APInt SignedMax = APInt::getSignedMaxValue(getBitWidth());
+
+ // a s+ b overflows high iff a s>=0 && b s>= 0 && a s> smax - b.
+ // a s+ b overflows low iff a s< 0 && b s< 0 && a s< smin - b.
+ if (Min.isNonNegative() && OtherMin.isNonNegative() &&
+ Min.sgt(SignedMax - OtherMin))
+ return OverflowResult::AlwaysOverflowsHigh;
+ if (Max.isNegative() && OtherMax.isNegative() &&
+ Max.slt(SignedMin - OtherMax))
+ return OverflowResult::AlwaysOverflowsLow;
+
+ if (Max.isNonNegative() && OtherMax.isNonNegative() &&
+ Max.sgt(SignedMax - OtherMax))
+ return OverflowResult::MayOverflow;
+ if (Min.isNegative() && OtherMin.isNegative() &&
+ Min.slt(SignedMin - OtherMin))
+ return OverflowResult::MayOverflow;
+
+ return OverflowResult::NeverOverflows;
+}
+
+ConstantRange::OverflowResult ConstantRange::unsignedSubMayOverflow(
+ const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return OverflowResult::MayOverflow;
+
+ APInt Min = getUnsignedMin(), Max = getUnsignedMax();
+ APInt OtherMin = Other.getUnsignedMin(), OtherMax = Other.getUnsignedMax();
+
+ // a u- b overflows low iff a u< b.
+ if (Max.ult(OtherMin))
+ return OverflowResult::AlwaysOverflowsLow;
+ if (Min.ult(OtherMax))
+ return OverflowResult::MayOverflow;
+ return OverflowResult::NeverOverflows;
+}
+
+ConstantRange::OverflowResult ConstantRange::signedSubMayOverflow(
+ const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return OverflowResult::MayOverflow;
+
+ APInt Min = getSignedMin(), Max = getSignedMax();
+ APInt OtherMin = Other.getSignedMin(), OtherMax = Other.getSignedMax();
+
+ APInt SignedMin = APInt::getSignedMinValue(getBitWidth());
+ APInt SignedMax = APInt::getSignedMaxValue(getBitWidth());
+
+ // a s- b overflows high iff a s>=0 && b s< 0 && a s> smax + b.
+ // a s- b overflows low iff a s< 0 && b s>= 0 && a s< smin + b.
+ if (Min.isNonNegative() && OtherMax.isNegative() &&
+ Min.sgt(SignedMax + OtherMax))
+ return OverflowResult::AlwaysOverflowsHigh;
+ if (Max.isNegative() && OtherMin.isNonNegative() &&
+ Max.slt(SignedMin + OtherMin))
+ return OverflowResult::AlwaysOverflowsLow;
+
+ if (Max.isNonNegative() && OtherMin.isNegative() &&
+ Max.sgt(SignedMax + OtherMin))
+ return OverflowResult::MayOverflow;
+ if (Min.isNegative() && OtherMax.isNonNegative() &&
+ Min.slt(SignedMin + OtherMax))
+ return OverflowResult::MayOverflow;
+
+ return OverflowResult::NeverOverflows;
+}
+
+ConstantRange::OverflowResult ConstantRange::unsignedMulMayOverflow(
+ const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return OverflowResult::MayOverflow;
+
+ APInt Min = getUnsignedMin(), Max = getUnsignedMax();
+ APInt OtherMin = Other.getUnsignedMin(), OtherMax = Other.getUnsignedMax();
+ bool Overflow;
+
+ (void) Min.umul_ov(OtherMin, Overflow);
+ if (Overflow)
+ return OverflowResult::AlwaysOverflowsHigh;
+
+ (void) Max.umul_ov(OtherMax, Overflow);
+ if (Overflow)
+ return OverflowResult::MayOverflow;
+
+ return OverflowResult::NeverOverflows;
+}
+
+void ConstantRange::print(raw_ostream &OS) const {
+ if (isFullSet())
+ OS << "full-set";
+ else if (isEmptySet())
+ OS << "empty-set";
+ else
+ OS << "[" << Lower << "," << Upper << ")";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void ConstantRange::dump() const {
+ print(dbgs());
+}
+#endif
+
+ConstantRange llvm::getConstantRangeFromMetadata(const MDNode &Ranges) {
+ const unsigned NumRanges = Ranges.getNumOperands() / 2;
+ assert(NumRanges >= 1 && "Must have at least one range!");
+ assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
+
+ auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
+ auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
+
+ ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
+
+ for (unsigned i = 1; i < NumRanges; ++i) {
+ auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
+ auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
+
+ // Note: unionWith will potentially create a range that contains values not
+ // contained in any of the original N ranges.
+ CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
+ }
+
+ return CR;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Constants.cpp b/contrib/llvm-project/llvm/lib/IR/Constants.cpp
new file mode 100644
index 000000000000..f9800cc0c07c
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Constants.cpp
@@ -0,0 +1,3490 @@
+//===-- Constants.cpp - Implement Constant nodes --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Constant* classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Constants.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/ConstantFold.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace llvm;
+using namespace PatternMatch;
+
+//===----------------------------------------------------------------------===//
+// Constant Class
+//===----------------------------------------------------------------------===//
+
+bool Constant::isNegativeZeroValue() const {
+ // Floating point values have an explicit -0.0 value.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->isZero() && CFP->isNegative();
+
+ // Equivalent for a vector of -0.0's.
+ if (getType()->isVectorTy())
+ if (const auto *SplatCFP = dyn_cast_or_null<ConstantFP>(getSplatValue()))
+ return SplatCFP->isNegativeZeroValue();
+
+ // We've already handled true FP case; any other FP vectors can't represent -0.0.
+ if (getType()->isFPOrFPVectorTy())
+ return false;
+
+ // Otherwise, just use +0.0.
+ return isNullValue();
+}
+
+// Return true iff this constant is positive zero (floating point), negative
+// zero (floating point), or a null value.
+bool Constant::isZeroValue() const {
+ // Floating point values have an explicit -0.0 value.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->isZero();
+
+ // Check for constant splat vectors of 1 values.
+ if (getType()->isVectorTy())
+ if (const auto *SplatCFP = dyn_cast_or_null<ConstantFP>(getSplatValue()))
+ return SplatCFP->isZero();
+
+ // Otherwise, just use +0.0.
+ return isNullValue();
+}
+
+bool Constant::isNullValue() const {
+ // 0 is null.
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isZero();
+
+ // +0.0 is null.
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ // ppc_fp128 determine isZero using high order double only
+ // Should check the bitwise value to make sure all bits are zero.
+ return CFP->isExactlyValue(+0.0);
+
+ // constant zero is zero for aggregates, cpnull is null for pointers, none for
+ // tokens.
+ return isa<ConstantAggregateZero>(this) || isa<ConstantPointerNull>(this) ||
+ isa<ConstantTokenNone>(this);
+}
+
+bool Constant::isAllOnesValue() const {
+ // Check for -1 integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMinusOne();
+
+ // Check for FP which are bitcasted from -1 integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isAllOnes();
+
+ // Check for constant splat vectors of 1 values.
+ if (getType()->isVectorTy())
+ if (const auto *SplatVal = getSplatValue())
+ return SplatVal->isAllOnesValue();
+
+ return false;
+}
+
+bool Constant::isOneValue() const {
+ // Check for 1 integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isOne();
+
+ // Check for FP which are bitcasted from 1 integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isOne();
+
+ // Check for constant splat vectors of 1 values.
+ if (getType()->isVectorTy())
+ if (const auto *SplatVal = getSplatValue())
+ return SplatVal->isOneValue();
+
+ return false;
+}
+
+bool Constant::isNotOneValue() const {
+ // Check for 1 integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return !CI->isOneValue();
+
+ // Check for FP which are bitcasted from 1 integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return !CFP->getValueAPF().bitcastToAPInt().isOne();
+
+ // Check that vectors don't contain 1
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ Constant *Elt = getAggregateElement(I);
+ if (!Elt || !Elt->isNotOneValue())
+ return false;
+ }
+ return true;
+ }
+
+ // Check for splats that don't contain 1
+ if (getType()->isVectorTy())
+ if (const auto *SplatVal = getSplatValue())
+ return SplatVal->isNotOneValue();
+
+ // It *may* contain 1, we can't tell.
+ return false;
+}
+
+bool Constant::isMinSignedValue() const {
+ // Check for INT_MIN integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->isMinValue(/*isSigned=*/true);
+
+ // Check for FP which are bitcasted from INT_MIN integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
+
+ // Check for splats of INT_MIN values.
+ if (getType()->isVectorTy())
+ if (const auto *SplatVal = getSplatValue())
+ return SplatVal->isMinSignedValue();
+
+ return false;
+}
+
+bool Constant::isNotMinSignedValue() const {
+ // Check for INT_MIN integers
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return !CI->isMinValue(/*isSigned=*/true);
+
+ // Check for FP which are bitcasted from INT_MIN integers
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
+ return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
+
+ // Check that vectors don't contain INT_MIN
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ Constant *Elt = getAggregateElement(I);
+ if (!Elt || !Elt->isNotMinSignedValue())
+ return false;
+ }
+ return true;
+ }
+
+ // Check for splats that aren't INT_MIN
+ if (getType()->isVectorTy())
+ if (const auto *SplatVal = getSplatValue())
+ return SplatVal->isNotMinSignedValue();
+
+ // It *may* contain INT_MIN, we can't tell.
+ return false;
+}
+
+bool Constant::isFiniteNonZeroFP() const {
+ if (auto *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().isFiniteNonZero();
+
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ auto *CFP = dyn_cast_or_null<ConstantFP>(getAggregateElement(I));
+ if (!CFP || !CFP->getValueAPF().isFiniteNonZero())
+ return false;
+ }
+ return true;
+ }
+
+ if (getType()->isVectorTy())
+ if (const auto *SplatCFP = dyn_cast_or_null<ConstantFP>(getSplatValue()))
+ return SplatCFP->isFiniteNonZeroFP();
+
+ // It *may* contain finite non-zero, we can't tell.
+ return false;
+}
+
+bool Constant::isNormalFP() const {
+ if (auto *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().isNormal();
+
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ auto *CFP = dyn_cast_or_null<ConstantFP>(getAggregateElement(I));
+ if (!CFP || !CFP->getValueAPF().isNormal())
+ return false;
+ }
+ return true;
+ }
+
+ if (getType()->isVectorTy())
+ if (const auto *SplatCFP = dyn_cast_or_null<ConstantFP>(getSplatValue()))
+ return SplatCFP->isNormalFP();
+
+ // It *may* contain a normal fp value, we can't tell.
+ return false;
+}
+
+bool Constant::hasExactInverseFP() const {
+ if (auto *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->getValueAPF().getExactInverse(nullptr);
+
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ auto *CFP = dyn_cast_or_null<ConstantFP>(getAggregateElement(I));
+ if (!CFP || !CFP->getValueAPF().getExactInverse(nullptr))
+ return false;
+ }
+ return true;
+ }
+
+ if (getType()->isVectorTy())
+ if (const auto *SplatCFP = dyn_cast_or_null<ConstantFP>(getSplatValue()))
+ return SplatCFP->hasExactInverseFP();
+
+ // It *may* have an exact inverse fp value, we can't tell.
+ return false;
+}
+
+bool Constant::isNaN() const {
+ if (auto *CFP = dyn_cast<ConstantFP>(this))
+ return CFP->isNaN();
+
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
+ auto *CFP = dyn_cast_or_null<ConstantFP>(getAggregateElement(I));
+ if (!CFP || !CFP->isNaN())
+ return false;
+ }
+ return true;
+ }
+
+ if (getType()->isVectorTy())
+ if (const auto *SplatCFP = dyn_cast_or_null<ConstantFP>(getSplatValue()))
+ return SplatCFP->isNaN();
+
+ // It *may* be NaN, we can't tell.
+ return false;
+}
+
+bool Constant::isElementWiseEqual(Value *Y) const {
+ // Are they fully identical?
+ if (this == Y)
+ return true;
+
+ // The input value must be a vector constant with the same type.
+ auto *VTy = dyn_cast<VectorType>(getType());
+ if (!isa<Constant>(Y) || !VTy || VTy != Y->getType())
+ return false;
+
+ // TODO: Compare pointer constants?
+ if (!(VTy->getElementType()->isIntegerTy() ||
+ VTy->getElementType()->isFloatingPointTy()))
+ return false;
+
+ // They may still be identical element-wise (if they have `undef`s).
+ // Bitcast to integer to allow exact bitwise comparison for all types.
+ Type *IntTy = VectorType::getInteger(VTy);
+ Constant *C0 = ConstantExpr::getBitCast(const_cast<Constant *>(this), IntTy);
+ Constant *C1 = ConstantExpr::getBitCast(cast<Constant>(Y), IntTy);
+ Constant *CmpEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, C0, C1);
+ return isa<UndefValue>(CmpEq) || match(CmpEq, m_One());
+}
+
+static bool
+containsUndefinedElement(const Constant *C,
+ function_ref<bool(const Constant *)> HasFn) {
+ if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
+ if (HasFn(C))
+ return true;
+ if (isa<ConstantAggregateZero>(C))
+ return false;
+ if (isa<ScalableVectorType>(C->getType()))
+ return false;
+
+ for (unsigned i = 0, e = cast<FixedVectorType>(VTy)->getNumElements();
+ i != e; ++i) {
+ if (Constant *Elem = C->getAggregateElement(i))
+ if (HasFn(Elem))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool Constant::containsUndefOrPoisonElement() const {
+ return containsUndefinedElement(
+ this, [&](const auto *C) { return isa<UndefValue>(C); });
+}
+
+bool Constant::containsPoisonElement() const {
+ return containsUndefinedElement(
+ this, [&](const auto *C) { return isa<PoisonValue>(C); });
+}
+
+bool Constant::containsConstantExpression() const {
+ if (auto *VTy = dyn_cast<FixedVectorType>(getType())) {
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ if (isa<ConstantExpr>(getAggregateElement(i)))
+ return true;
+ }
+ return false;
+}
+
+/// Constructor to create a '0' constant of arbitrary type.
+Constant *Constant::getNullValue(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ return ConstantInt::get(Ty, 0);
+ case Type::HalfTyID:
+ case Type::BFloatTyID:
+ case Type::FloatTyID:
+ case Type::DoubleTyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ return ConstantFP::get(Ty->getContext(),
+ APFloat::getZero(Ty->getFltSemantics()));
+ case Type::PointerTyID:
+ return ConstantPointerNull::get(cast<PointerType>(Ty));
+ case Type::StructTyID:
+ case Type::ArrayTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ return ConstantAggregateZero::get(Ty);
+ case Type::TokenTyID:
+ return ConstantTokenNone::get(Ty->getContext());
+ default:
+ // Function, Label, or Opaque type?
+ llvm_unreachable("Cannot create a null constant of that type!");
+ }
+}
+
+Constant *Constant::getIntegerValue(Type *Ty, const APInt &V) {
+ Type *ScalarTy = Ty->getScalarType();
+
+ // Create the base integer constant.
+ Constant *C = ConstantInt::get(Ty->getContext(), V);
+
+ // Convert an integer to a pointer, if necessary.
+ if (PointerType *PTy = dyn_cast<PointerType>(ScalarTy))
+ C = ConstantExpr::getIntToPtr(C, PTy);
+
+ // Broadcast a scalar to a vector, if necessary.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ C = ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *Constant::getAllOnesValue(Type *Ty) {
+ if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
+ return ConstantInt::get(Ty->getContext(),
+ APInt::getAllOnes(ITy->getBitWidth()));
+
+ if (Ty->isFloatingPointTy()) {
+ APFloat FL = APFloat::getAllOnesValue(Ty->getFltSemantics());
+ return ConstantFP::get(Ty->getContext(), FL);
+ }
+
+ VectorType *VTy = cast<VectorType>(Ty);
+ return ConstantVector::getSplat(VTy->getElementCount(),
+ getAllOnesValue(VTy->getElementType()));
+}
+
+Constant *Constant::getAggregateElement(unsigned Elt) const {
+ assert((getType()->isAggregateType() || getType()->isVectorTy()) &&
+ "Must be an aggregate/vector constant");
+
+ if (const auto *CC = dyn_cast<ConstantAggregate>(this))
+ return Elt < CC->getNumOperands() ? CC->getOperand(Elt) : nullptr;
+
+ if (const auto *CAZ = dyn_cast<ConstantAggregateZero>(this))
+ return Elt < CAZ->getElementCount().getKnownMinValue()
+ ? CAZ->getElementValue(Elt)
+ : nullptr;
+
+ // FIXME: getNumElements() will fail for non-fixed vector types.
+ if (isa<ScalableVectorType>(getType()))
+ return nullptr;
+
+ if (const auto *PV = dyn_cast<PoisonValue>(this))
+ return Elt < PV->getNumElements() ? PV->getElementValue(Elt) : nullptr;
+
+ if (const auto *UV = dyn_cast<UndefValue>(this))
+ return Elt < UV->getNumElements() ? UV->getElementValue(Elt) : nullptr;
+
+ if (const auto *CDS = dyn_cast<ConstantDataSequential>(this))
+ return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt)
+ : nullptr;
+
+ return nullptr;
+}
+
+Constant *Constant::getAggregateElement(Constant *Elt) const {
+ assert(isa<IntegerType>(Elt->getType()) && "Index must be an integer");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt)) {
+ // Check if the constant fits into an uint64_t.
+ if (CI->getValue().getActiveBits() > 64)
+ return nullptr;
+ return getAggregateElement(CI->getZExtValue());
+ }
+ return nullptr;
+}
+
+void Constant::destroyConstant() {
+ /// First call destroyConstantImpl on the subclass. This gives the subclass
+ /// a chance to remove the constant from any maps/pools it's contained in.
+ switch (getValueID()) {
+ default:
+ llvm_unreachable("Not a constant!");
+#define HANDLE_CONSTANT(Name) \
+ case Value::Name##Val: \
+ cast<Name>(this)->destroyConstantImpl(); \
+ break;
+#include "llvm/IR/Value.def"
+ }
+
+ // When a Constant is destroyed, there may be lingering
+ // references to the constant by other constants in the constant pool. These
+ // constants are implicitly dependent on the module that is being deleted,
+ // but they don't know that. Because we only find out when the CPV is
+ // deleted, we must now notify all of our users (that should only be
+ // Constants) that they are, in fact, invalid now and should be deleted.
+ //
+ while (!use_empty()) {
+ Value *V = user_back();
+#ifndef NDEBUG // Only in -g mode...
+ if (!isa<Constant>(V)) {
+ dbgs() << "While deleting: " << *this
+ << "\n\nUse still stuck around after Def is destroyed: " << *V
+ << "\n\n";
+ }
+#endif
+ assert(isa<Constant>(V) && "References remain to Constant being destroyed");
+ cast<Constant>(V)->destroyConstant();
+
+ // The constant should remove itself from our use list...
+ assert((use_empty() || user_back() != V) && "Constant not removed!");
+ }
+
+ // Value has no outstanding references it is safe to delete it now...
+ deleteConstant(this);
+}
+
+void llvm::deleteConstant(Constant *C) {
+ switch (C->getValueID()) {
+ case Constant::ConstantIntVal:
+ delete static_cast<ConstantInt *>(C);
+ break;
+ case Constant::ConstantFPVal:
+ delete static_cast<ConstantFP *>(C);
+ break;
+ case Constant::ConstantAggregateZeroVal:
+ delete static_cast<ConstantAggregateZero *>(C);
+ break;
+ case Constant::ConstantArrayVal:
+ delete static_cast<ConstantArray *>(C);
+ break;
+ case Constant::ConstantStructVal:
+ delete static_cast<ConstantStruct *>(C);
+ break;
+ case Constant::ConstantVectorVal:
+ delete static_cast<ConstantVector *>(C);
+ break;
+ case Constant::ConstantPointerNullVal:
+ delete static_cast<ConstantPointerNull *>(C);
+ break;
+ case Constant::ConstantDataArrayVal:
+ delete static_cast<ConstantDataArray *>(C);
+ break;
+ case Constant::ConstantDataVectorVal:
+ delete static_cast<ConstantDataVector *>(C);
+ break;
+ case Constant::ConstantTokenNoneVal:
+ delete static_cast<ConstantTokenNone *>(C);
+ break;
+ case Constant::BlockAddressVal:
+ delete static_cast<BlockAddress *>(C);
+ break;
+ case Constant::DSOLocalEquivalentVal:
+ delete static_cast<DSOLocalEquivalent *>(C);
+ break;
+ case Constant::NoCFIValueVal:
+ delete static_cast<NoCFIValue *>(C);
+ break;
+ case Constant::UndefValueVal:
+ delete static_cast<UndefValue *>(C);
+ break;
+ case Constant::PoisonValueVal:
+ delete static_cast<PoisonValue *>(C);
+ break;
+ case Constant::ConstantExprVal:
+ if (isa<UnaryConstantExpr>(C))
+ delete static_cast<UnaryConstantExpr *>(C);
+ else if (isa<BinaryConstantExpr>(C))
+ delete static_cast<BinaryConstantExpr *>(C);
+ else if (isa<SelectConstantExpr>(C))
+ delete static_cast<SelectConstantExpr *>(C);
+ else if (isa<ExtractElementConstantExpr>(C))
+ delete static_cast<ExtractElementConstantExpr *>(C);
+ else if (isa<InsertElementConstantExpr>(C))
+ delete static_cast<InsertElementConstantExpr *>(C);
+ else if (isa<ShuffleVectorConstantExpr>(C))
+ delete static_cast<ShuffleVectorConstantExpr *>(C);
+ else if (isa<GetElementPtrConstantExpr>(C))
+ delete static_cast<GetElementPtrConstantExpr *>(C);
+ else if (isa<CompareConstantExpr>(C))
+ delete static_cast<CompareConstantExpr *>(C);
+ else
+ llvm_unreachable("Unexpected constant expr");
+ break;
+ default:
+ llvm_unreachable("Unexpected constant");
+ }
+}
+
+/// Check if C contains a GlobalValue for which Predicate is true.
+static bool
+ConstHasGlobalValuePredicate(const Constant *C,
+ bool (*Predicate)(const GlobalValue *)) {
+ SmallPtrSet<const Constant *, 8> Visited;
+ SmallVector<const Constant *, 8> WorkList;
+ WorkList.push_back(C);
+ Visited.insert(C);
+
+ while (!WorkList.empty()) {
+ const Constant *WorkItem = WorkList.pop_back_val();
+ if (const auto *GV = dyn_cast<GlobalValue>(WorkItem))
+ if (Predicate(GV))
+ return true;
+ for (const Value *Op : WorkItem->operands()) {
+ const Constant *ConstOp = dyn_cast<Constant>(Op);
+ if (!ConstOp)
+ continue;
+ if (Visited.insert(ConstOp).second)
+ WorkList.push_back(ConstOp);
+ }
+ }
+ return false;
+}
+
+bool Constant::isThreadDependent() const {
+ auto DLLImportPredicate = [](const GlobalValue *GV) {
+ return GV->isThreadLocal();
+ };
+ return ConstHasGlobalValuePredicate(this, DLLImportPredicate);
+}
+
+bool Constant::isDLLImportDependent() const {
+ auto DLLImportPredicate = [](const GlobalValue *GV) {
+ return GV->hasDLLImportStorageClass();
+ };
+ return ConstHasGlobalValuePredicate(this, DLLImportPredicate);
+}
+
+bool Constant::isConstantUsed() const {
+ for (const User *U : users()) {
+ const Constant *UC = dyn_cast<Constant>(U);
+ if (!UC || isa<GlobalValue>(UC))
+ return true;
+
+ if (UC->isConstantUsed())
+ return true;
+ }
+ return false;
+}
+
+bool Constant::needsDynamicRelocation() const {
+ return getRelocationInfo() == GlobalRelocation;
+}
+
+bool Constant::needsRelocation() const {
+ return getRelocationInfo() != NoRelocation;
+}
+
+Constant::PossibleRelocationsTy Constant::getRelocationInfo() const {
+ if (isa<GlobalValue>(this))
+ return GlobalRelocation; // Global reference.
+
+ if (const BlockAddress *BA = dyn_cast<BlockAddress>(this))
+ return BA->getFunction()->getRelocationInfo();
+
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(this)) {
+ if (CE->getOpcode() == Instruction::Sub) {
+ ConstantExpr *LHS = dyn_cast<ConstantExpr>(CE->getOperand(0));
+ ConstantExpr *RHS = dyn_cast<ConstantExpr>(CE->getOperand(1));
+ if (LHS && RHS && LHS->getOpcode() == Instruction::PtrToInt &&
+ RHS->getOpcode() == Instruction::PtrToInt) {
+ Constant *LHSOp0 = LHS->getOperand(0);
+ Constant *RHSOp0 = RHS->getOperand(0);
+
+ // While raw uses of blockaddress need to be relocated, differences
+ // between two of them don't when they are for labels in the same
+ // function. This is a common idiom when creating a table for the
+ // indirect goto extension, so we handle it efficiently here.
+ if (isa<BlockAddress>(LHSOp0) && isa<BlockAddress>(RHSOp0) &&
+ cast<BlockAddress>(LHSOp0)->getFunction() ==
+ cast<BlockAddress>(RHSOp0)->getFunction())
+ return NoRelocation;
+
+ // Relative pointers do not need to be dynamically relocated.
+ if (auto *RHSGV =
+ dyn_cast<GlobalValue>(RHSOp0->stripInBoundsConstantOffsets())) {
+ auto *LHS = LHSOp0->stripInBoundsConstantOffsets();
+ if (auto *LHSGV = dyn_cast<GlobalValue>(LHS)) {
+ if (LHSGV->isDSOLocal() && RHSGV->isDSOLocal())
+ return LocalRelocation;
+ } else if (isa<DSOLocalEquivalent>(LHS)) {
+ if (RHSGV->isDSOLocal())
+ return LocalRelocation;
+ }
+ }
+ }
+ }
+ }
+
+ PossibleRelocationsTy Result = NoRelocation;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ Result =
+ std::max(cast<Constant>(getOperand(i))->getRelocationInfo(), Result);
+
+ return Result;
+}
+
+/// Return true if the specified constantexpr is dead. This involves
+/// recursively traversing users of the constantexpr.
+/// If RemoveDeadUsers is true, also remove dead users at the same time.
+static bool constantIsDead(const Constant *C, bool RemoveDeadUsers) {
+ if (isa<GlobalValue>(C)) return false; // Cannot remove this
+
+ Value::const_user_iterator I = C->user_begin(), E = C->user_end();
+ while (I != E) {
+ const Constant *User = dyn_cast<Constant>(*I);
+ if (!User) return false; // Non-constant usage;
+ if (!constantIsDead(User, RemoveDeadUsers))
+ return false; // Constant wasn't dead
+
+ // Just removed User, so the iterator was invalidated.
+ // Since we return immediately upon finding a live user, we can always
+ // restart from user_begin().
+ if (RemoveDeadUsers)
+ I = C->user_begin();
+ else
+ ++I;
+ }
+
+ if (RemoveDeadUsers) {
+ // If C is only used by metadata, it should not be preserved but should
+ // have its uses replaced.
+ ReplaceableMetadataImpl::SalvageDebugInfo(*C);
+ const_cast<Constant *>(C)->destroyConstant();
+ }
+
+ return true;
+}
+
+void Constant::removeDeadConstantUsers() const {
+ Value::const_user_iterator I = user_begin(), E = user_end();
+ Value::const_user_iterator LastNonDeadUser = E;
+ while (I != E) {
+ const Constant *User = dyn_cast<Constant>(*I);
+ if (!User) {
+ LastNonDeadUser = I;
+ ++I;
+ continue;
+ }
+
+ if (!constantIsDead(User, /* RemoveDeadUsers= */ true)) {
+ // If the constant wasn't dead, remember that this was the last live use
+ // and move on to the next constant.
+ LastNonDeadUser = I;
+ ++I;
+ continue;
+ }
+
+ // If the constant was dead, then the iterator is invalidated.
+ if (LastNonDeadUser == E)
+ I = user_begin();
+ else
+ I = std::next(LastNonDeadUser);
+ }
+}
+
+bool Constant::hasOneLiveUse() const { return hasNLiveUses(1); }
+
+bool Constant::hasZeroLiveUses() const { return hasNLiveUses(0); }
+
+bool Constant::hasNLiveUses(unsigned N) const {
+ unsigned NumUses = 0;
+ for (const Use &U : uses()) {
+ const Constant *User = dyn_cast<Constant>(U.getUser());
+ if (!User || !constantIsDead(User, /* RemoveDeadUsers= */ false)) {
+ ++NumUses;
+
+ if (NumUses > N)
+ return false;
+ }
+ }
+ return NumUses == N;
+}
+
+Constant *Constant::replaceUndefsWith(Constant *C, Constant *Replacement) {
+ assert(C && Replacement && "Expected non-nullptr constant arguments");
+ Type *Ty = C->getType();
+ if (match(C, m_Undef())) {
+ assert(Ty == Replacement->getType() && "Expected matching types");
+ return Replacement;
+ }
+
+ // Don't know how to deal with this constant.
+ auto *VTy = dyn_cast<FixedVectorType>(Ty);
+ if (!VTy)
+ return C;
+
+ unsigned NumElts = VTy->getNumElements();
+ SmallVector<Constant *, 32> NewC(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *EltC = C->getAggregateElement(i);
+ assert((!EltC || EltC->getType() == Replacement->getType()) &&
+ "Expected matching types");
+ NewC[i] = EltC && match(EltC, m_Undef()) ? Replacement : EltC;
+ }
+ return ConstantVector::get(NewC);
+}
+
+Constant *Constant::mergeUndefsWith(Constant *C, Constant *Other) {
+ assert(C && Other && "Expected non-nullptr constant arguments");
+ if (match(C, m_Undef()))
+ return C;
+
+ Type *Ty = C->getType();
+ if (match(Other, m_Undef()))
+ return UndefValue::get(Ty);
+
+ auto *VTy = dyn_cast<FixedVectorType>(Ty);
+ if (!VTy)
+ return C;
+
+ Type *EltTy = VTy->getElementType();
+ unsigned NumElts = VTy->getNumElements();
+ assert(isa<FixedVectorType>(Other->getType()) &&
+ cast<FixedVectorType>(Other->getType())->getNumElements() == NumElts &&
+ "Type mismatch");
+
+ bool FoundExtraUndef = false;
+ SmallVector<Constant *, 32> NewC(NumElts);
+ for (unsigned I = 0; I != NumElts; ++I) {
+ NewC[I] = C->getAggregateElement(I);
+ Constant *OtherEltC = Other->getAggregateElement(I);
+ assert(NewC[I] && OtherEltC && "Unknown vector element");
+ if (!match(NewC[I], m_Undef()) && match(OtherEltC, m_Undef())) {
+ NewC[I] = UndefValue::get(EltTy);
+ FoundExtraUndef = true;
+ }
+ }
+ if (FoundExtraUndef)
+ return ConstantVector::get(NewC);
+ return C;
+}
+
+bool Constant::isManifestConstant() const {
+ if (isa<ConstantData>(this))
+ return true;
+ if (isa<ConstantAggregate>(this) || isa<ConstantExpr>(this)) {
+ for (const Value *Op : operand_values())
+ if (!cast<Constant>(Op)->isManifestConstant())
+ return false;
+ return true;
+ }
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantInt
+//===----------------------------------------------------------------------===//
+
+ConstantInt::ConstantInt(IntegerType *Ty, const APInt &V)
+ : ConstantData(Ty, ConstantIntVal), Val(V) {
+ assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
+}
+
+ConstantInt *ConstantInt::getTrue(LLVMContext &Context) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ if (!pImpl->TheTrueVal)
+ pImpl->TheTrueVal = ConstantInt::get(Type::getInt1Ty(Context), 1);
+ return pImpl->TheTrueVal;
+}
+
+ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ if (!pImpl->TheFalseVal)
+ pImpl->TheFalseVal = ConstantInt::get(Type::getInt1Ty(Context), 0);
+ return pImpl->TheFalseVal;
+}
+
+ConstantInt *ConstantInt::getBool(LLVMContext &Context, bool V) {
+ return V ? getTrue(Context) : getFalse(Context);
+}
+
+Constant *ConstantInt::getTrue(Type *Ty) {
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
+ ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), TrueC);
+ return TrueC;
+}
+
+Constant *ConstantInt::getFalse(Type *Ty) {
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
+ ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext());
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), FalseC);
+ return FalseC;
+}
+
+Constant *ConstantInt::getBool(Type *Ty, bool V) {
+ return V ? getTrue(Ty) : getFalse(Ty);
+}
+
+// Get a ConstantInt from an APInt.
+ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) {
+ // get an existing value or the insertion position
+ LLVMContextImpl *pImpl = Context.pImpl;
+ std::unique_ptr<ConstantInt> &Slot = pImpl->IntConstants[V];
+ if (!Slot) {
+ // Get the corresponding integer type for the bit width of the value.
+ IntegerType *ITy = IntegerType::get(Context, V.getBitWidth());
+ Slot.reset(new ConstantInt(ITy, V));
+ }
+ assert(Slot->getType() == IntegerType::get(Context, V.getBitWidth()));
+ return Slot.get();
+}
+
+Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
+ Constant *C = get(cast<IntegerType>(Ty->getScalarType()), V, isSigned);
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V, bool isSigned) {
+ return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned));
+}
+
+ConstantInt *ConstantInt::getSigned(IntegerType *Ty, int64_t V) {
+ return get(Ty, V, true);
+}
+
+Constant *ConstantInt::getSigned(Type *Ty, int64_t V) {
+ return get(Ty, V, true);
+}
+
+Constant *ConstantInt::get(Type *Ty, const APInt& V) {
+ ConstantInt *C = get(Ty->getContext(), V);
+ assert(C->getType() == Ty->getScalarType() &&
+ "ConstantInt type doesn't match the type implied by its value!");
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+ConstantInt *ConstantInt::get(IntegerType* Ty, StringRef Str, uint8_t radix) {
+ return get(Ty->getContext(), APInt(Ty->getBitWidth(), Str, radix));
+}
+
+/// Remove the constant from the constant table.
+void ConstantInt::destroyConstantImpl() {
+ llvm_unreachable("You can't ConstantInt->destroyConstantImpl()!");
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantFP
+//===----------------------------------------------------------------------===//
+
+Constant *ConstantFP::get(Type *Ty, double V) {
+ LLVMContext &Context = Ty->getContext();
+
+ APFloat FV(V);
+ bool ignored;
+ FV.convert(Ty->getScalarType()->getFltSemantics(),
+ APFloat::rmNearestTiesToEven, &ignored);
+ Constant *C = get(Context, FV);
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::get(Type *Ty, const APFloat &V) {
+ ConstantFP *C = get(Ty->getContext(), V);
+ assert(C->getType() == Ty->getScalarType() &&
+ "ConstantFP type doesn't match the type implied by its value!");
+
+ // For vectors, broadcast the value.
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::get(Type *Ty, StringRef Str) {
+ LLVMContext &Context = Ty->getContext();
+
+ APFloat FV(Ty->getScalarType()->getFltSemantics(), Str);
+ Constant *C = get(Context, FV);
+
+ // For vectors, broadcast the value.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getNaN(Type *Ty, bool Negative, uint64_t Payload) {
+ const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
+ APFloat NaN = APFloat::getNaN(Semantics, Negative, Payload);
+ Constant *C = get(Ty->getContext(), NaN);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getQNaN(Type *Ty, bool Negative, APInt *Payload) {
+ const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
+ APFloat NaN = APFloat::getQNaN(Semantics, Negative, Payload);
+ Constant *C = get(Ty->getContext(), NaN);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getSNaN(Type *Ty, bool Negative, APInt *Payload) {
+ const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
+ APFloat NaN = APFloat::getSNaN(Semantics, Negative, Payload);
+ Constant *C = get(Ty->getContext(), NaN);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getZero(Type *Ty, bool Negative) {
+ const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
+ APFloat NegZero = APFloat::getZero(Semantics, Negative);
+ Constant *C = get(Ty->getContext(), NegZero);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getZeroValueForNegation(Type *Ty) {
+ if (Ty->isFPOrFPVectorTy())
+ return getNegativeZero(Ty);
+
+ return Constant::getNullValue(Ty);
+}
+
+
+// ConstantFP accessors.
+ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
+ LLVMContextImpl* pImpl = Context.pImpl;
+
+ std::unique_ptr<ConstantFP> &Slot = pImpl->FPConstants[V];
+
+ if (!Slot) {
+ Type *Ty = Type::getFloatingPointTy(Context, V.getSemantics());
+ Slot.reset(new ConstantFP(Ty, V));
+ }
+
+ return Slot.get();
+}
+
+Constant *ConstantFP::getInfinity(Type *Ty, bool Negative) {
+ const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
+ Constant *C = get(Ty->getContext(), APFloat::getInf(Semantics, Negative));
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getElementCount(), C);
+
+ return C;
+}
+
+ConstantFP::ConstantFP(Type *Ty, const APFloat &V)
+ : ConstantData(Ty, ConstantFPVal), Val(V) {
+ assert(&V.getSemantics() == &Ty->getFltSemantics() &&
+ "FP type Mismatch");
+}
+
+bool ConstantFP::isExactlyValue(const APFloat &V) const {
+ return Val.bitwiseIsEqual(V);
+}
+
+/// Remove the constant from the constant table.
+void ConstantFP::destroyConstantImpl() {
+ llvm_unreachable("You can't ConstantFP->destroyConstantImpl()!");
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantAggregateZero Implementation
+//===----------------------------------------------------------------------===//
+
+Constant *ConstantAggregateZero::getSequentialElement() const {
+ if (auto *AT = dyn_cast<ArrayType>(getType()))
+ return Constant::getNullValue(AT->getElementType());
+ return Constant::getNullValue(cast<VectorType>(getType())->getElementType());
+}
+
+Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const {
+ return Constant::getNullValue(getType()->getStructElementType(Elt));
+}
+
+Constant *ConstantAggregateZero::getElementValue(Constant *C) const {
+ if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const {
+ if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+ElementCount ConstantAggregateZero::getElementCount() const {
+ Type *Ty = getType();
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
+ return ElementCount::getFixed(AT->getNumElements());
+ if (auto *VT = dyn_cast<VectorType>(Ty))
+ return VT->getElementCount();
+ return ElementCount::getFixed(Ty->getStructNumElements());
+}
+
+//===----------------------------------------------------------------------===//
+// UndefValue Implementation
+//===----------------------------------------------------------------------===//
+
+UndefValue *UndefValue::getSequentialElement() const {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(getType()))
+ return UndefValue::get(ATy->getElementType());
+ return UndefValue::get(cast<VectorType>(getType())->getElementType());
+}
+
+UndefValue *UndefValue::getStructElement(unsigned Elt) const {
+ return UndefValue::get(getType()->getStructElementType(Elt));
+}
+
+UndefValue *UndefValue::getElementValue(Constant *C) const {
+ if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+UndefValue *UndefValue::getElementValue(unsigned Idx) const {
+ if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+unsigned UndefValue::getNumElements() const {
+ Type *Ty = getType();
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
+ return AT->getNumElements();
+ if (auto *VT = dyn_cast<VectorType>(Ty))
+ return cast<FixedVectorType>(VT)->getNumElements();
+ return Ty->getStructNumElements();
+}
+
+//===----------------------------------------------------------------------===//
+// PoisonValue Implementation
+//===----------------------------------------------------------------------===//
+
+PoisonValue *PoisonValue::getSequentialElement() const {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(getType()))
+ return PoisonValue::get(ATy->getElementType());
+ return PoisonValue::get(cast<VectorType>(getType())->getElementType());
+}
+
+PoisonValue *PoisonValue::getStructElement(unsigned Elt) const {
+ return PoisonValue::get(getType()->getStructElementType(Elt));
+}
+
+PoisonValue *PoisonValue::getElementValue(Constant *C) const {
+ if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
+ return getSequentialElement();
+ return getStructElement(cast<ConstantInt>(C)->getZExtValue());
+}
+
+PoisonValue *PoisonValue::getElementValue(unsigned Idx) const {
+ if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
+ return getSequentialElement();
+ return getStructElement(Idx);
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantXXX Classes
+//===----------------------------------------------------------------------===//
+
+template <typename ItTy, typename EltTy>
+static bool rangeOnlyContains(ItTy Start, ItTy End, EltTy Elt) {
+ for (; Start != End; ++Start)
+ if (*Start != Elt)
+ return false;
+ return true;
+}
+
+template <typename SequentialTy, typename ElementTy>
+static Constant *getIntSequenceIfElementsMatch(ArrayRef<Constant *> V) {
+ assert(!V.empty() && "Cannot get empty int sequence.");
+
+ SmallVector<ElementTy, 16> Elts;
+ for (Constant *C : V)
+ if (auto *CI = dyn_cast<ConstantInt>(C))
+ Elts.push_back(CI->getZExtValue());
+ else
+ return nullptr;
+ return SequentialTy::get(V[0]->getContext(), Elts);
+}
+
+template <typename SequentialTy, typename ElementTy>
+static Constant *getFPSequenceIfElementsMatch(ArrayRef<Constant *> V) {
+ assert(!V.empty() && "Cannot get empty FP sequence.");
+
+ SmallVector<ElementTy, 16> Elts;
+ for (Constant *C : V)
+ if (auto *CFP = dyn_cast<ConstantFP>(C))
+ Elts.push_back(CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ else
+ return nullptr;
+ return SequentialTy::getFP(V[0]->getType(), Elts);
+}
+
+template <typename SequenceTy>
+static Constant *getSequenceIfElementsMatch(Constant *C,
+ ArrayRef<Constant *> V) {
+ // We speculatively build the elements here even if it turns out that there is
+ // a constantexpr or something else weird, since it is so uncommon for that to
+ // happen.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+ if (CI->getType()->isIntegerTy(8))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint8_t>(V);
+ else if (CI->getType()->isIntegerTy(16))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint16_t>(V);
+ else if (CI->getType()->isIntegerTy(32))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint32_t>(V);
+ else if (CI->getType()->isIntegerTy(64))
+ return getIntSequenceIfElementsMatch<SequenceTy, uint64_t>(V);
+ } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ if (CFP->getType()->isHalfTy() || CFP->getType()->isBFloatTy())
+ return getFPSequenceIfElementsMatch<SequenceTy, uint16_t>(V);
+ else if (CFP->getType()->isFloatTy())
+ return getFPSequenceIfElementsMatch<SequenceTy, uint32_t>(V);
+ else if (CFP->getType()->isDoubleTy())
+ return getFPSequenceIfElementsMatch<SequenceTy, uint64_t>(V);
+ }
+
+ return nullptr;
+}
+
+ConstantAggregate::ConstantAggregate(Type *T, ValueTy VT,
+ ArrayRef<Constant *> V)
+ : Constant(T, VT, OperandTraits<ConstantAggregate>::op_end(this) - V.size(),
+ V.size()) {
+ llvm::copy(V, op_begin());
+
+ // Check that types match, unless this is an opaque struct.
+ if (auto *ST = dyn_cast<StructType>(T)) {
+ if (ST->isOpaque())
+ return;
+ for (unsigned I = 0, E = V.size(); I != E; ++I)
+ assert(V[I]->getType() == ST->getTypeAtIndex(I) &&
+ "Initializer for struct element doesn't match!");
+ }
+}
+
+ConstantArray::ConstantArray(ArrayType *T, ArrayRef<Constant *> V)
+ : ConstantAggregate(T, ConstantArrayVal, V) {
+ assert(V.size() == T->getNumElements() &&
+ "Invalid initializer for constant array");
+}
+
+Constant *ConstantArray::get(ArrayType *Ty, ArrayRef<Constant*> V) {
+ if (Constant *C = getImpl(Ty, V))
+ return C;
+ return Ty->getContext().pImpl->ArrayConstants.getOrCreate(Ty, V);
+}
+
+Constant *ConstantArray::getImpl(ArrayType *Ty, ArrayRef<Constant*> V) {
+ // Empty arrays are canonicalized to ConstantAggregateZero.
+ if (V.empty())
+ return ConstantAggregateZero::get(Ty);
+
+ for (Constant *C : V) {
+ assert(C->getType() == Ty->getElementType() &&
+ "Wrong type in array element initializer");
+ (void)C;
+ }
+
+ // If this is an all-zero array, return a ConstantAggregateZero object. If
+ // all undef, return an UndefValue, if "all simple", then return a
+ // ConstantDataArray.
+ Constant *C = V[0];
+ if (isa<PoisonValue>(C) && rangeOnlyContains(V.begin(), V.end(), C))
+ return PoisonValue::get(Ty);
+
+ if (isa<UndefValue>(C) && rangeOnlyContains(V.begin(), V.end(), C))
+ return UndefValue::get(Ty);
+
+ if (C->isNullValue() && rangeOnlyContains(V.begin(), V.end(), C))
+ return ConstantAggregateZero::get(Ty);
+
+ // Check to see if all of the elements are ConstantFP or ConstantInt and if
+ // the element type is compatible with ConstantDataVector. If so, use it.
+ if (ConstantDataSequential::isElementTypeCompatible(C->getType()))
+ return getSequenceIfElementsMatch<ConstantDataArray>(C, V);
+
+ // Otherwise, we really do want to create a ConstantArray.
+ return nullptr;
+}
+
+StructType *ConstantStruct::getTypeForElements(LLVMContext &Context,
+ ArrayRef<Constant*> V,
+ bool Packed) {
+ unsigned VecSize = V.size();
+ SmallVector<Type*, 16> EltTypes(VecSize);
+ for (unsigned i = 0; i != VecSize; ++i)
+ EltTypes[i] = V[i]->getType();
+
+ return StructType::get(Context, EltTypes, Packed);
+}
+
+
+StructType *ConstantStruct::getTypeForElements(ArrayRef<Constant*> V,
+ bool Packed) {
+ assert(!V.empty() &&
+ "ConstantStruct::getTypeForElements cannot be called on empty list");
+ return getTypeForElements(V[0]->getContext(), V, Packed);
+}
+
+ConstantStruct::ConstantStruct(StructType *T, ArrayRef<Constant *> V)
+ : ConstantAggregate(T, ConstantStructVal, V) {
+ assert((T->isOpaque() || V.size() == T->getNumElements()) &&
+ "Invalid initializer for constant struct");
+}
+
+// ConstantStruct accessors.
+Constant *ConstantStruct::get(StructType *ST, ArrayRef<Constant*> V) {
+ assert((ST->isOpaque() || ST->getNumElements() == V.size()) &&
+ "Incorrect # elements specified to ConstantStruct::get");
+
+ // Create a ConstantAggregateZero value if all elements are zeros.
+ bool isZero = true;
+ bool isUndef = false;
+ bool isPoison = false;
+
+ if (!V.empty()) {
+ isUndef = isa<UndefValue>(V[0]);
+ isPoison = isa<PoisonValue>(V[0]);
+ isZero = V[0]->isNullValue();
+ // PoisonValue inherits UndefValue, so its check is not necessary.
+ if (isUndef || isZero) {
+ for (Constant *C : V) {
+ if (!C->isNullValue())
+ isZero = false;
+ if (!isa<PoisonValue>(C))
+ isPoison = false;
+ if (isa<PoisonValue>(C) || !isa<UndefValue>(C))
+ isUndef = false;
+ }
+ }
+ }
+ if (isZero)
+ return ConstantAggregateZero::get(ST);
+ if (isPoison)
+ return PoisonValue::get(ST);
+ if (isUndef)
+ return UndefValue::get(ST);
+
+ return ST->getContext().pImpl->StructConstants.getOrCreate(ST, V);
+}
+
+ConstantVector::ConstantVector(VectorType *T, ArrayRef<Constant *> V)
+ : ConstantAggregate(T, ConstantVectorVal, V) {
+ assert(V.size() == cast<FixedVectorType>(T)->getNumElements() &&
+ "Invalid initializer for constant vector");
+}
+
+// ConstantVector accessors.
+Constant *ConstantVector::get(ArrayRef<Constant*> V) {
+ if (Constant *C = getImpl(V))
+ return C;
+ auto *Ty = FixedVectorType::get(V.front()->getType(), V.size());
+ return Ty->getContext().pImpl->VectorConstants.getOrCreate(Ty, V);
+}
+
+Constant *ConstantVector::getImpl(ArrayRef<Constant*> V) {
+ assert(!V.empty() && "Vectors can't be empty");
+ auto *T = FixedVectorType::get(V.front()->getType(), V.size());
+
+ // If this is an all-undef or all-zero vector, return a
+ // ConstantAggregateZero or UndefValue.
+ Constant *C = V[0];
+ bool isZero = C->isNullValue();
+ bool isUndef = isa<UndefValue>(C);
+ bool isPoison = isa<PoisonValue>(C);
+
+ if (isZero || isUndef) {
+ for (unsigned i = 1, e = V.size(); i != e; ++i)
+ if (V[i] != C) {
+ isZero = isUndef = isPoison = false;
+ break;
+ }
+ }
+
+ if (isZero)
+ return ConstantAggregateZero::get(T);
+ if (isPoison)
+ return PoisonValue::get(T);
+ if (isUndef)
+ return UndefValue::get(T);
+
+ // Check to see if all of the elements are ConstantFP or ConstantInt and if
+ // the element type is compatible with ConstantDataVector. If so, use it.
+ if (ConstantDataSequential::isElementTypeCompatible(C->getType()))
+ return getSequenceIfElementsMatch<ConstantDataVector>(C, V);
+
+ // Otherwise, the element type isn't compatible with ConstantDataVector, or
+ // the operand list contains a ConstantExpr or something else strange.
+ return nullptr;
+}
+
+Constant *ConstantVector::getSplat(ElementCount EC, Constant *V) {
+ if (!EC.isScalable()) {
+ // If this splat is compatible with ConstantDataVector, use it instead of
+ // ConstantVector.
+ if ((isa<ConstantFP>(V) || isa<ConstantInt>(V)) &&
+ ConstantDataSequential::isElementTypeCompatible(V->getType()))
+ return ConstantDataVector::getSplat(EC.getKnownMinValue(), V);
+
+ SmallVector<Constant *, 32> Elts(EC.getKnownMinValue(), V);
+ return get(Elts);
+ }
+
+ Type *VTy = VectorType::get(V->getType(), EC);
+
+ if (V->isNullValue())
+ return ConstantAggregateZero::get(VTy);
+ else if (isa<UndefValue>(V))
+ return UndefValue::get(VTy);
+
+ Type *I32Ty = Type::getInt32Ty(VTy->getContext());
+
+ // Move scalar into vector.
+ Constant *PoisonV = PoisonValue::get(VTy);
+ V = ConstantExpr::getInsertElement(PoisonV, V, ConstantInt::get(I32Ty, 0));
+ // Build shuffle mask to perform the splat.
+ SmallVector<int, 8> Zeros(EC.getKnownMinValue(), 0);
+ // Splat.
+ return ConstantExpr::getShuffleVector(V, PoisonV, Zeros);
+}
+
+ConstantTokenNone *ConstantTokenNone::get(LLVMContext &Context) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ if (!pImpl->TheNoneToken)
+ pImpl->TheNoneToken.reset(new ConstantTokenNone(Context));
+ return pImpl->TheNoneToken.get();
+}
+
+/// Remove the constant from the constant table.
+void ConstantTokenNone::destroyConstantImpl() {
+ llvm_unreachable("You can't ConstantTokenNone->destroyConstantImpl()!");
+}
+
+// Utility function for determining if a ConstantExpr is a CastOp or not. This
+// can't be inline because we don't want to #include Instruction.h into
+// Constant.h
+bool ConstantExpr::isCast() const {
+ return Instruction::isCast(getOpcode());
+}
+
+bool ConstantExpr::isCompare() const {
+ return getOpcode() == Instruction::ICmp || getOpcode() == Instruction::FCmp;
+}
+
+unsigned ConstantExpr::getPredicate() const {
+ return cast<CompareConstantExpr>(this)->predicate;
+}
+
+ArrayRef<int> ConstantExpr::getShuffleMask() const {
+ return cast<ShuffleVectorConstantExpr>(this)->ShuffleMask;
+}
+
+Constant *ConstantExpr::getShuffleMaskForBitcode() const {
+ return cast<ShuffleVectorConstantExpr>(this)->ShuffleMaskForBitcode;
+}
+
+Constant *ConstantExpr::getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
+ bool OnlyIfReduced, Type *SrcTy) const {
+ assert(Ops.size() == getNumOperands() && "Operand count mismatch!");
+
+ // If no operands changed return self.
+ if (Ty == getType() && std::equal(Ops.begin(), Ops.end(), op_begin()))
+ return const_cast<ConstantExpr*>(this);
+
+ Type *OnlyIfReducedTy = OnlyIfReduced ? Ty : nullptr;
+ switch (getOpcode()) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ return ConstantExpr::getCast(getOpcode(), Ops[0], Ty, OnlyIfReduced);
+ case Instruction::Select:
+ return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2], OnlyIfReducedTy);
+ case Instruction::InsertElement:
+ return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2],
+ OnlyIfReducedTy);
+ case Instruction::ExtractElement:
+ return ConstantExpr::getExtractElement(Ops[0], Ops[1], OnlyIfReducedTy);
+ case Instruction::FNeg:
+ return ConstantExpr::getFNeg(Ops[0]);
+ case Instruction::ShuffleVector:
+ return ConstantExpr::getShuffleVector(Ops[0], Ops[1], getShuffleMask(),
+ OnlyIfReducedTy);
+ case Instruction::GetElementPtr: {
+ auto *GEPO = cast<GEPOperator>(this);
+ assert(SrcTy || (Ops[0]->getType() == getOperand(0)->getType()));
+ return ConstantExpr::getGetElementPtr(
+ SrcTy ? SrcTy : GEPO->getSourceElementType(), Ops[0], Ops.slice(1),
+ GEPO->isInBounds(), GEPO->getInRangeIndex(), OnlyIfReducedTy);
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1],
+ OnlyIfReducedTy);
+ default:
+ assert(getNumOperands() == 2 && "Must be binary operator?");
+ return ConstantExpr::get(getOpcode(), Ops[0], Ops[1], SubclassOptionalData,
+ OnlyIfReducedTy);
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// isValueValidForType implementations
+
+bool ConstantInt::isValueValidForType(Type *Ty, uint64_t Val) {
+ unsigned NumBits = Ty->getIntegerBitWidth(); // assert okay
+ if (Ty->isIntegerTy(1))
+ return Val == 0 || Val == 1;
+ return isUIntN(NumBits, Val);
+}
+
+bool ConstantInt::isValueValidForType(Type *Ty, int64_t Val) {
+ unsigned NumBits = Ty->getIntegerBitWidth();
+ if (Ty->isIntegerTy(1))
+ return Val == 0 || Val == 1 || Val == -1;
+ return isIntN(NumBits, Val);
+}
+
+bool ConstantFP::isValueValidForType(Type *Ty, const APFloat& Val) {
+ // convert modifies in place, so make a copy.
+ APFloat Val2 = APFloat(Val);
+ bool losesInfo;
+ switch (Ty->getTypeID()) {
+ default:
+ return false; // These can't be represented as floating point!
+
+ // FIXME rounding mode needs to be more flexible
+ case Type::HalfTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEhalf())
+ return true;
+ Val2.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::BFloatTyID: {
+ if (&Val2.getSemantics() == &APFloat::BFloat())
+ return true;
+ Val2.convert(APFloat::BFloat(), APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::FloatTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEsingle())
+ return true;
+ Val2.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::DoubleTyID: {
+ if (&Val2.getSemantics() == &APFloat::IEEEhalf() ||
+ &Val2.getSemantics() == &APFloat::BFloat() ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle() ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble())
+ return true;
+ Val2.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &losesInfo);
+ return !losesInfo;
+ }
+ case Type::X86_FP80TyID:
+ return &Val2.getSemantics() == &APFloat::IEEEhalf() ||
+ &Val2.getSemantics() == &APFloat::BFloat() ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle() ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble() ||
+ &Val2.getSemantics() == &APFloat::x87DoubleExtended();
+ case Type::FP128TyID:
+ return &Val2.getSemantics() == &APFloat::IEEEhalf() ||
+ &Val2.getSemantics() == &APFloat::BFloat() ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle() ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble() ||
+ &Val2.getSemantics() == &APFloat::IEEEquad();
+ case Type::PPC_FP128TyID:
+ return &Val2.getSemantics() == &APFloat::IEEEhalf() ||
+ &Val2.getSemantics() == &APFloat::BFloat() ||
+ &Val2.getSemantics() == &APFloat::IEEEsingle() ||
+ &Val2.getSemantics() == &APFloat::IEEEdouble() ||
+ &Val2.getSemantics() == &APFloat::PPCDoubleDouble();
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Factory Function Implementation
+
+ConstantAggregateZero *ConstantAggregateZero::get(Type *Ty) {
+ assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) &&
+ "Cannot create an aggregate zero of non-aggregate type!");
+
+ std::unique_ptr<ConstantAggregateZero> &Entry =
+ Ty->getContext().pImpl->CAZConstants[Ty];
+ if (!Entry)
+ Entry.reset(new ConstantAggregateZero(Ty));
+
+ return Entry.get();
+}
+
+/// Remove the constant from the constant table.
+void ConstantAggregateZero::destroyConstantImpl() {
+ getContext().pImpl->CAZConstants.erase(getType());
+}
+
+/// Remove the constant from the constant table.
+void ConstantArray::destroyConstantImpl() {
+ getType()->getContext().pImpl->ArrayConstants.remove(this);
+}
+
+
+//---- ConstantStruct::get() implementation...
+//
+
+/// Remove the constant from the constant table.
+void ConstantStruct::destroyConstantImpl() {
+ getType()->getContext().pImpl->StructConstants.remove(this);
+}
+
+/// Remove the constant from the constant table.
+void ConstantVector::destroyConstantImpl() {
+ getType()->getContext().pImpl->VectorConstants.remove(this);
+}
+
+Constant *Constant::getSplatValue(bool AllowUndefs) const {
+ assert(this->getType()->isVectorTy() && "Only valid for vectors!");
+ if (isa<ConstantAggregateZero>(this))
+ return getNullValue(cast<VectorType>(getType())->getElementType());
+ if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
+ return CV->getSplatValue();
+ if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
+ return CV->getSplatValue(AllowUndefs);
+
+ // Check if this is a constant expression splat of the form returned by
+ // ConstantVector::getSplat()
+ const auto *Shuf = dyn_cast<ConstantExpr>(this);
+ if (Shuf && Shuf->getOpcode() == Instruction::ShuffleVector &&
+ isa<UndefValue>(Shuf->getOperand(1))) {
+
+ const auto *IElt = dyn_cast<ConstantExpr>(Shuf->getOperand(0));
+ if (IElt && IElt->getOpcode() == Instruction::InsertElement &&
+ isa<UndefValue>(IElt->getOperand(0))) {
+
+ ArrayRef<int> Mask = Shuf->getShuffleMask();
+ Constant *SplatVal = IElt->getOperand(1);
+ ConstantInt *Index = dyn_cast<ConstantInt>(IElt->getOperand(2));
+
+ if (Index && Index->getValue() == 0 &&
+ llvm::all_of(Mask, [](int I) { return I == 0; }))
+ return SplatVal;
+ }
+ }
+
+ return nullptr;
+}
+
+Constant *ConstantVector::getSplatValue(bool AllowUndefs) const {
+ // Check out first element.
+ Constant *Elt = getOperand(0);
+ // Then make sure all remaining elements point to the same value.
+ for (unsigned I = 1, E = getNumOperands(); I < E; ++I) {
+ Constant *OpC = getOperand(I);
+ if (OpC == Elt)
+ continue;
+
+ // Strict mode: any mismatch is not a splat.
+ if (!AllowUndefs)
+ return nullptr;
+
+ // Allow undefs mode: ignore undefined elements.
+ if (isa<UndefValue>(OpC))
+ continue;
+
+ // If we do not have a defined element yet, use the current operand.
+ if (isa<UndefValue>(Elt))
+ Elt = OpC;
+
+ if (OpC != Elt)
+ return nullptr;
+ }
+ return Elt;
+}
+
+const APInt &Constant::getUniqueInteger() const {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(this))
+ return CI->getValue();
+ assert(this->getSplatValue() && "Doesn't contain a unique integer!");
+ const Constant *C = this->getAggregateElement(0U);
+ assert(C && isa<ConstantInt>(C) && "Not a vector of numbers!");
+ return cast<ConstantInt>(C)->getValue();
+}
+
+//---- ConstantPointerNull::get() implementation.
+//
+
+ConstantPointerNull *ConstantPointerNull::get(PointerType *Ty) {
+ std::unique_ptr<ConstantPointerNull> &Entry =
+ Ty->getContext().pImpl->CPNConstants[Ty];
+ if (!Entry)
+ Entry.reset(new ConstantPointerNull(Ty));
+
+ return Entry.get();
+}
+
+/// Remove the constant from the constant table.
+void ConstantPointerNull::destroyConstantImpl() {
+ getContext().pImpl->CPNConstants.erase(getType());
+}
+
+UndefValue *UndefValue::get(Type *Ty) {
+ std::unique_ptr<UndefValue> &Entry = Ty->getContext().pImpl->UVConstants[Ty];
+ if (!Entry)
+ Entry.reset(new UndefValue(Ty));
+
+ return Entry.get();
+}
+
+/// Remove the constant from the constant table.
+void UndefValue::destroyConstantImpl() {
+ // Free the constant and any dangling references to it.
+ if (getValueID() == UndefValueVal) {
+ getContext().pImpl->UVConstants.erase(getType());
+ } else if (getValueID() == PoisonValueVal) {
+ getContext().pImpl->PVConstants.erase(getType());
+ }
+ llvm_unreachable("Not a undef or a poison!");
+}
+
+PoisonValue *PoisonValue::get(Type *Ty) {
+ std::unique_ptr<PoisonValue> &Entry = Ty->getContext().pImpl->PVConstants[Ty];
+ if (!Entry)
+ Entry.reset(new PoisonValue(Ty));
+
+ return Entry.get();
+}
+
+/// Remove the constant from the constant table.
+void PoisonValue::destroyConstantImpl() {
+ // Free the constant and any dangling references to it.
+ getContext().pImpl->PVConstants.erase(getType());
+}
+
+BlockAddress *BlockAddress::get(BasicBlock *BB) {
+ assert(BB->getParent() && "Block must have a parent");
+ return get(BB->getParent(), BB);
+}
+
+BlockAddress *BlockAddress::get(Function *F, BasicBlock *BB) {
+ BlockAddress *&BA =
+ F->getContext().pImpl->BlockAddresses[std::make_pair(F, BB)];
+ if (!BA)
+ BA = new BlockAddress(F, BB);
+
+ assert(BA->getFunction() == F && "Basic block moved between functions");
+ return BA;
+}
+
+BlockAddress::BlockAddress(Function *F, BasicBlock *BB)
+ : Constant(Type::getInt8PtrTy(F->getContext(), F->getAddressSpace()),
+ Value::BlockAddressVal, &Op<0>(), 2) {
+ setOperand(0, F);
+ setOperand(1, BB);
+ BB->AdjustBlockAddressRefCount(1);
+}
+
+BlockAddress *BlockAddress::lookup(const BasicBlock *BB) {
+ if (!BB->hasAddressTaken())
+ return nullptr;
+
+ const Function *F = BB->getParent();
+ assert(F && "Block must have a parent");
+ BlockAddress *BA =
+ F->getContext().pImpl->BlockAddresses.lookup(std::make_pair(F, BB));
+ assert(BA && "Refcount and block address map disagree!");
+ return BA;
+}
+
+/// Remove the constant from the constant table.
+void BlockAddress::destroyConstantImpl() {
+ getFunction()->getType()->getContext().pImpl
+ ->BlockAddresses.erase(std::make_pair(getFunction(), getBasicBlock()));
+ getBasicBlock()->AdjustBlockAddressRefCount(-1);
+}
+
+Value *BlockAddress::handleOperandChangeImpl(Value *From, Value *To) {
+ // This could be replacing either the Basic Block or the Function. In either
+ // case, we have to remove the map entry.
+ Function *NewF = getFunction();
+ BasicBlock *NewBB = getBasicBlock();
+
+ if (From == NewF)
+ NewF = cast<Function>(To->stripPointerCasts());
+ else {
+ assert(From == NewBB && "From does not match any operand");
+ NewBB = cast<BasicBlock>(To);
+ }
+
+ // See if the 'new' entry already exists, if not, just update this in place
+ // and return early.
+ BlockAddress *&NewBA =
+ getContext().pImpl->BlockAddresses[std::make_pair(NewF, NewBB)];
+ if (NewBA)
+ return NewBA;
+
+ getBasicBlock()->AdjustBlockAddressRefCount(-1);
+
+ // Remove the old entry, this can't cause the map to rehash (just a
+ // tombstone will get added).
+ getContext().pImpl->BlockAddresses.erase(std::make_pair(getFunction(),
+ getBasicBlock()));
+ NewBA = this;
+ setOperand(0, NewF);
+ setOperand(1, NewBB);
+ getBasicBlock()->AdjustBlockAddressRefCount(1);
+
+ // If we just want to keep the existing value, then return null.
+ // Callers know that this means we shouldn't delete this value.
+ return nullptr;
+}
+
+DSOLocalEquivalent *DSOLocalEquivalent::get(GlobalValue *GV) {
+ DSOLocalEquivalent *&Equiv = GV->getContext().pImpl->DSOLocalEquivalents[GV];
+ if (!Equiv)
+ Equiv = new DSOLocalEquivalent(GV);
+
+ assert(Equiv->getGlobalValue() == GV &&
+ "DSOLocalFunction does not match the expected global value");
+ return Equiv;
+}
+
+DSOLocalEquivalent::DSOLocalEquivalent(GlobalValue *GV)
+ : Constant(GV->getType(), Value::DSOLocalEquivalentVal, &Op<0>(), 1) {
+ setOperand(0, GV);
+}
+
+/// Remove the constant from the constant table.
+void DSOLocalEquivalent::destroyConstantImpl() {
+ const GlobalValue *GV = getGlobalValue();
+ GV->getContext().pImpl->DSOLocalEquivalents.erase(GV);
+}
+
+Value *DSOLocalEquivalent::handleOperandChangeImpl(Value *From, Value *To) {
+ assert(From == getGlobalValue() && "Changing value does not match operand.");
+ assert(isa<Constant>(To) && "Can only replace the operands with a constant");
+
+ // The replacement is with another global value.
+ if (const auto *ToObj = dyn_cast<GlobalValue>(To)) {
+ DSOLocalEquivalent *&NewEquiv =
+ getContext().pImpl->DSOLocalEquivalents[ToObj];
+ if (NewEquiv)
+ return llvm::ConstantExpr::getBitCast(NewEquiv, getType());
+ }
+
+ // If the argument is replaced with a null value, just replace this constant
+ // with a null value.
+ if (cast<Constant>(To)->isNullValue())
+ return To;
+
+ // The replacement could be a bitcast or an alias to another function. We can
+ // replace it with a bitcast to the dso_local_equivalent of that function.
+ auto *Func = cast<Function>(To->stripPointerCastsAndAliases());
+ DSOLocalEquivalent *&NewEquiv = getContext().pImpl->DSOLocalEquivalents[Func];
+ if (NewEquiv)
+ return llvm::ConstantExpr::getBitCast(NewEquiv, getType());
+
+ // Replace this with the new one.
+ getContext().pImpl->DSOLocalEquivalents.erase(getGlobalValue());
+ NewEquiv = this;
+ setOperand(0, Func);
+
+ if (Func->getType() != getType()) {
+ // It is ok to mutate the type here because this constant should always
+ // reflect the type of the function it's holding.
+ mutateType(Func->getType());
+ }
+ return nullptr;
+}
+
+NoCFIValue *NoCFIValue::get(GlobalValue *GV) {
+ NoCFIValue *&NC = GV->getContext().pImpl->NoCFIValues[GV];
+ if (!NC)
+ NC = new NoCFIValue(GV);
+
+ assert(NC->getGlobalValue() == GV &&
+ "NoCFIValue does not match the expected global value");
+ return NC;
+}
+
+NoCFIValue::NoCFIValue(GlobalValue *GV)
+ : Constant(GV->getType(), Value::NoCFIValueVal, &Op<0>(), 1) {
+ setOperand(0, GV);
+}
+
+/// Remove the constant from the constant table.
+void NoCFIValue::destroyConstantImpl() {
+ const GlobalValue *GV = getGlobalValue();
+ GV->getContext().pImpl->NoCFIValues.erase(GV);
+}
+
+Value *NoCFIValue::handleOperandChangeImpl(Value *From, Value *To) {
+ assert(From == getGlobalValue() && "Changing value does not match operand.");
+
+ GlobalValue *GV = dyn_cast<GlobalValue>(To->stripPointerCasts());
+ assert(GV && "Can only replace the operands with a global value");
+
+ NoCFIValue *&NewNC = getContext().pImpl->NoCFIValues[GV];
+ if (NewNC)
+ return llvm::ConstantExpr::getBitCast(NewNC, getType());
+
+ getContext().pImpl->NoCFIValues.erase(getGlobalValue());
+ NewNC = this;
+ setOperand(0, GV);
+
+ if (GV->getType() != getType())
+ mutateType(GV->getType());
+
+ return nullptr;
+}
+
+//---- ConstantExpr::get() implementations.
+//
+
+/// This is a utility function to handle folding of casts and lookup of the
+/// cast in the ExprConstants map. It is used by the various get* methods below.
+static Constant *getFoldedCast(Instruction::CastOps opc, Constant *C, Type *Ty,
+ bool OnlyIfReduced = false) {
+ assert(Ty->isFirstClassType() && "Cannot cast to an aggregate type!");
+ // Fold a few common cases
+ if (Constant *FC = ConstantFoldCastInstruction(opc, C, Ty))
+ return FC;
+
+ if (OnlyIfReduced)
+ return nullptr;
+
+ LLVMContextImpl *pImpl = Ty->getContext().pImpl;
+
+ // Look up the constant in the table first to ensure uniqueness.
+ ConstantExprKeyType Key(opc, C);
+
+ return pImpl->ExprConstants.getOrCreate(Ty, Key);
+}
+
+Constant *ConstantExpr::getCast(unsigned oc, Constant *C, Type *Ty,
+ bool OnlyIfReduced) {
+ Instruction::CastOps opc = Instruction::CastOps(oc);
+ assert(Instruction::isCast(opc) && "opcode out of range");
+ assert(C && Ty && "Null arguments to getCast");
+ assert(CastInst::castIsValid(opc, C, Ty) && "Invalid constantexpr cast!");
+
+ switch (opc) {
+ default:
+ llvm_unreachable("Invalid cast opcode");
+ case Instruction::Trunc:
+ return getTrunc(C, Ty, OnlyIfReduced);
+ case Instruction::ZExt:
+ return getZExt(C, Ty, OnlyIfReduced);
+ case Instruction::SExt:
+ return getSExt(C, Ty, OnlyIfReduced);
+ case Instruction::FPTrunc:
+ return getFPTrunc(C, Ty, OnlyIfReduced);
+ case Instruction::FPExt:
+ return getFPExtend(C, Ty, OnlyIfReduced);
+ case Instruction::UIToFP:
+ return getUIToFP(C, Ty, OnlyIfReduced);
+ case Instruction::SIToFP:
+ return getSIToFP(C, Ty, OnlyIfReduced);
+ case Instruction::FPToUI:
+ return getFPToUI(C, Ty, OnlyIfReduced);
+ case Instruction::FPToSI:
+ return getFPToSI(C, Ty, OnlyIfReduced);
+ case Instruction::PtrToInt:
+ return getPtrToInt(C, Ty, OnlyIfReduced);
+ case Instruction::IntToPtr:
+ return getIntToPtr(C, Ty, OnlyIfReduced);
+ case Instruction::BitCast:
+ return getBitCast(C, Ty, OnlyIfReduced);
+ case Instruction::AddrSpaceCast:
+ return getAddrSpaceCast(C, Ty, OnlyIfReduced);
+ }
+}
+
+Constant *ConstantExpr::getZExtOrBitCast(Constant *C, Type *Ty) {
+ if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return getBitCast(C, Ty);
+ return getZExt(C, Ty);
+}
+
+Constant *ConstantExpr::getSExtOrBitCast(Constant *C, Type *Ty) {
+ if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return getBitCast(C, Ty);
+ return getSExt(C, Ty);
+}
+
+Constant *ConstantExpr::getTruncOrBitCast(Constant *C, Type *Ty) {
+ if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return getBitCast(C, Ty);
+ return getTrunc(C, Ty);
+}
+
+Constant *ConstantExpr::getSExtOrTrunc(Constant *C, Type *Ty) {
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "Can only sign extend/truncate integers!");
+ Type *CTy = C->getType();
+ if (CTy->getScalarSizeInBits() < Ty->getScalarSizeInBits())
+ return getSExt(C, Ty);
+ if (CTy->getScalarSizeInBits() > Ty->getScalarSizeInBits())
+ return getTrunc(C, Ty);
+ return C;
+}
+
+Constant *ConstantExpr::getPointerCast(Constant *S, Type *Ty) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
+ "Invalid cast");
+
+ if (Ty->isIntOrIntVectorTy())
+ return getPtrToInt(S, Ty);
+
+ unsigned SrcAS = S->getType()->getPointerAddressSpace();
+ if (Ty->isPtrOrPtrVectorTy() && SrcAS != Ty->getPointerAddressSpace())
+ return getAddrSpaceCast(S, Ty);
+
+ return getBitCast(S, Ty);
+}
+
+Constant *ConstantExpr::getPointerBitCastOrAddrSpaceCast(Constant *S,
+ Type *Ty) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return getAddrSpaceCast(S, Ty);
+
+ return getBitCast(S, Ty);
+}
+
+Constant *ConstantExpr::getIntegerCast(Constant *C, Type *Ty, bool isSigned) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ Ty->isIntOrIntVectorTy() && "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::Trunc :
+ (isSigned ? Instruction::SExt : Instruction::ZExt)));
+ return getCast(opcode, C, Ty);
+}
+
+Constant *ConstantExpr::getFPCast(Constant *C, Type *Ty) {
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ if (SrcBits == DstBits)
+ return C; // Avoid a useless cast
+ Instruction::CastOps opcode =
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt);
+ return getCast(opcode, C, Ty);
+}
+
+Constant *ConstantExpr::getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && "Trunc operand must be integer");
+ assert(Ty->isIntOrIntVectorTy() && "Trunc produces only integral");
+ assert(C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&&
+ "SrcTy must be larger than DestTy for Trunc!");
+
+ return getFoldedCast(Instruction::Trunc, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getSExt(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && "SExt operand must be integral");
+ assert(Ty->isIntOrIntVectorTy() && "SExt produces only integer");
+ assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&&
+ "SrcTy must be smaller than DestTy for SExt!");
+
+ return getFoldedCast(Instruction::SExt, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getZExt(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && "ZEXt operand must be integral");
+ assert(Ty->isIntOrIntVectorTy() && "ZExt produces only integer");
+ assert(C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&&
+ "SrcTy must be smaller than DestTy for ZExt!");
+
+ return getFoldedCast(Instruction::ZExt, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ C->getType()->getScalarSizeInBits() > Ty->getScalarSizeInBits()&&
+ "This is an illegal floating point truncation!");
+ return getFoldedCast(Instruction::FPTrunc, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPExtend(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ C->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits()&&
+ "This is an illegal floating point extension!");
+ return getFoldedCast(Instruction::FPExt, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "This is an illegal uint to floating point cast!");
+ return getFoldedCast(Instruction::UIToFP, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "This is an illegal sint to floating point cast!");
+ return getFoldedCast(Instruction::SIToFP, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "This is an illegal floating point to uint cast!");
+ return getFoldedCast(Instruction::FPToUI, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced) {
+#ifndef NDEBUG
+ bool fromVec = isa<VectorType>(C->getType());
+ bool toVec = isa<VectorType>(Ty);
+#endif
+ assert((fromVec == toVec) && "Cannot convert from scalar to/from vector");
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "This is an illegal floating point to sint cast!");
+ return getFoldedCast(Instruction::FPToSI, C, Ty, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(C->getType()->isPtrOrPtrVectorTy() &&
+ "PtrToInt source must be pointer or pointer vector");
+ assert(DstTy->isIntOrIntVectorTy() &&
+ "PtrToInt destination must be integer or integer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(cast<VectorType>(C->getType())->getElementCount() ==
+ cast<VectorType>(DstTy)->getElementCount() &&
+ "Invalid cast between a different number of vector elements");
+ return getFoldedCast(Instruction::PtrToInt, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ "IntToPtr source must be integer or integer vector");
+ assert(DstTy->isPtrOrPtrVectorTy() &&
+ "IntToPtr destination must be a pointer or pointer vector");
+ assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
+ if (isa<VectorType>(C->getType()))
+ assert(cast<VectorType>(C->getType())->getElementCount() ==
+ cast<VectorType>(DstTy)->getElementCount() &&
+ "Invalid cast between a different number of vector elements");
+ return getFoldedCast(Instruction::IntToPtr, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getBitCast(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(CastInst::castIsValid(Instruction::BitCast, C, DstTy) &&
+ "Invalid constantexpr bitcast!");
+
+ // It is common to ask for a bitcast of a value to its own type, handle this
+ // speedily.
+ if (C->getType() == DstTy) return C;
+
+ return getFoldedCast(Instruction::BitCast, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy,
+ bool OnlyIfReduced) {
+ assert(CastInst::castIsValid(Instruction::AddrSpaceCast, C, DstTy) &&
+ "Invalid constantexpr addrspacecast!");
+
+ // Canonicalize addrspacecasts between different pointer types by first
+ // bitcasting the pointer type and then converting the address space.
+ PointerType *SrcScalarTy = cast<PointerType>(C->getType()->getScalarType());
+ PointerType *DstScalarTy = cast<PointerType>(DstTy->getScalarType());
+ if (!SrcScalarTy->hasSameElementTypeAs(DstScalarTy)) {
+ Type *MidTy = PointerType::getWithSamePointeeType(
+ DstScalarTy, SrcScalarTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(DstTy)) {
+ // Handle vectors of pointers.
+ MidTy = FixedVectorType::get(MidTy,
+ cast<FixedVectorType>(VT)->getNumElements());
+ }
+ C = getBitCast(C, MidTy);
+ }
+ return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced);
+}
+
+Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags,
+ Type *OnlyIfReducedTy) {
+ // Check the operands for consistency first.
+ assert(Instruction::isUnaryOp(Opcode) &&
+ "Invalid opcode in unary constant expression");
+
+#ifndef NDEBUG
+ switch (Opcode) {
+ case Instruction::FNeg:
+ assert(C->getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ default:
+ break;
+ }
+#endif
+
+ if (Constant *FC = ConstantFoldUnaryInstruction(Opcode, C))
+ return FC;
+
+ if (OnlyIfReducedTy == C->getType())
+ return nullptr;
+
+ Constant *ArgVec[] = { C };
+ ConstantExprKeyType Key(Opcode, ArgVec, 0, Flags);
+
+ LLVMContextImpl *pImpl = C->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(C->getType(), Key);
+}
+
+Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
+ unsigned Flags, Type *OnlyIfReducedTy) {
+ // Check the operands for consistency first.
+ assert(Instruction::isBinaryOp(Opcode) &&
+ "Invalid opcode in binary constant expression");
+ assert(isSupportedBinOp(Opcode) &&
+ "Binop not supported as constant expression");
+ assert(C1->getType() == C2->getType() &&
+ "Operand types in binary constant expression should match");
+
+#ifndef NDEBUG
+ switch (Opcode) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create an integer operation on a non-integer type!");
+ break;
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ assert(C1->getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create a logical operation on a non-integral type!");
+ break;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ assert(C1->getType()->isIntOrIntVectorTy() &&
+ "Tried to create a shift operation on a non-integer type!");
+ break;
+ default:
+ break;
+ }
+#endif
+
+ if (Constant *FC = ConstantFoldBinaryInstruction(Opcode, C1, C2))
+ return FC;
+
+ if (OnlyIfReducedTy == C1->getType())
+ return nullptr;
+
+ Constant *ArgVec[] = { C1, C2 };
+ ConstantExprKeyType Key(Opcode, ArgVec, 0, Flags);
+
+ LLVMContextImpl *pImpl = C1->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(C1->getType(), Key);
+}
+
+bool ConstantExpr::isDesirableBinOp(unsigned Opcode) {
+ switch (Opcode) {
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ return false;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return true;
+ default:
+ llvm_unreachable("Argument must be binop opcode");
+ }
+}
+
+bool ConstantExpr::isSupportedBinOp(unsigned Opcode) {
+ switch (Opcode) {
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ return false;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return true;
+ default:
+ llvm_unreachable("Argument must be binop opcode");
+ }
+}
+
+Constant *ConstantExpr::getSizeOf(Type* Ty) {
+ // sizeof is implemented as: (i64) gep (Ty*)null, 1
+ // Note that a non-inbounds gep is used, as null isn't within any object.
+ Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
+ Constant *GEP = getGetElementPtr(
+ Ty, Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx);
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
+}
+
+Constant *ConstantExpr::getAlignOf(Type* Ty) {
+ // alignof is implemented as: (i64) gep ({i1,Ty}*)null, 0, 1
+ // Note that a non-inbounds gep is used, as null isn't within any object.
+ Type *AligningTy = StructType::get(Type::getInt1Ty(Ty->getContext()), Ty);
+ Constant *NullPtr = Constant::getNullValue(AligningTy->getPointerTo(0));
+ Constant *Zero = ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0);
+ Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
+ Constant *Indices[2] = { Zero, One };
+ Constant *GEP = getGetElementPtr(AligningTy, NullPtr, Indices);
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
+}
+
+Constant *ConstantExpr::getOffsetOf(StructType* STy, unsigned FieldNo) {
+ return getOffsetOf(STy, ConstantInt::get(Type::getInt32Ty(STy->getContext()),
+ FieldNo));
+}
+
+Constant *ConstantExpr::getOffsetOf(Type* Ty, Constant *FieldNo) {
+ // offsetof is implemented as: (i64) gep (Ty*)null, 0, FieldNo
+ // Note that a non-inbounds gep is used, as null isn't within any object.
+ Constant *GEPIdx[] = {
+ ConstantInt::get(Type::getInt64Ty(Ty->getContext()), 0),
+ FieldNo
+ };
+ Constant *GEP = getGetElementPtr(
+ Ty, Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx);
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
+}
+
+Constant *ConstantExpr::getCompare(unsigned short Predicate, Constant *C1,
+ Constant *C2, bool OnlyIfReduced) {
+ assert(C1->getType() == C2->getType() && "Op types should be identical!");
+
+ switch (Predicate) {
+ default: llvm_unreachable("Invalid CmpInst predicate");
+ case CmpInst::FCMP_FALSE: case CmpInst::FCMP_OEQ: case CmpInst::FCMP_OGT:
+ case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLT: case CmpInst::FCMP_OLE:
+ case CmpInst::FCMP_ONE: case CmpInst::FCMP_ORD: case CmpInst::FCMP_UNO:
+ case CmpInst::FCMP_UEQ: case CmpInst::FCMP_UGT: case CmpInst::FCMP_UGE:
+ case CmpInst::FCMP_ULT: case CmpInst::FCMP_ULE: case CmpInst::FCMP_UNE:
+ case CmpInst::FCMP_TRUE:
+ return getFCmp(Predicate, C1, C2, OnlyIfReduced);
+
+ case CmpInst::ICMP_EQ: case CmpInst::ICMP_NE: case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_UGE: case CmpInst::ICMP_ULT: case CmpInst::ICMP_ULE:
+ case CmpInst::ICMP_SGT: case CmpInst::ICMP_SGE: case CmpInst::ICMP_SLT:
+ case CmpInst::ICMP_SLE:
+ return getICmp(Predicate, C1, C2, OnlyIfReduced);
+ }
+}
+
+Constant *ConstantExpr::getSelect(Constant *C, Constant *V1, Constant *V2,
+ Type *OnlyIfReducedTy) {
+ assert(!SelectInst::areInvalidOperands(C, V1, V2)&&"Invalid select operands");
+
+ if (Constant *SC = ConstantFoldSelectInstruction(C, V1, V2))
+ return SC; // Fold common cases
+
+ if (OnlyIfReducedTy == V1->getType())
+ return nullptr;
+
+ Constant *ArgVec[] = { C, V1, V2 };
+ ConstantExprKeyType Key(Instruction::Select, ArgVec);
+
+ LLVMContextImpl *pImpl = C->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(V1->getType(), Key);
+}
+
+Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> Idxs, bool InBounds,
+ Optional<unsigned> InRangeIndex,
+ Type *OnlyIfReducedTy) {
+ PointerType *OrigPtrTy = cast<PointerType>(C->getType()->getScalarType());
+ assert(Ty && "Must specify element type");
+ assert(OrigPtrTy->isOpaqueOrPointeeTypeMatches(Ty));
+
+ if (Constant *FC =
+ ConstantFoldGetElementPtr(Ty, C, InBounds, InRangeIndex, Idxs))
+ return FC; // Fold a few common cases.
+
+ // Get the result type of the getelementptr!
+ Type *DestTy = GetElementPtrInst::getIndexedType(Ty, Idxs);
+ assert(DestTy && "GEP indices invalid!");
+ unsigned AS = OrigPtrTy->getAddressSpace();
+ Type *ReqTy = OrigPtrTy->isOpaque()
+ ? PointerType::get(OrigPtrTy->getContext(), AS)
+ : DestTy->getPointerTo(AS);
+
+ auto EltCount = ElementCount::getFixed(0);
+ if (VectorType *VecTy = dyn_cast<VectorType>(C->getType()))
+ EltCount = VecTy->getElementCount();
+ else
+ for (auto Idx : Idxs)
+ if (VectorType *VecTy = dyn_cast<VectorType>(Idx->getType()))
+ EltCount = VecTy->getElementCount();
+
+ if (EltCount.isNonZero())
+ ReqTy = VectorType::get(ReqTy, EltCount);
+
+ if (OnlyIfReducedTy == ReqTy)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ std::vector<Constant*> ArgVec;
+ ArgVec.reserve(1 + Idxs.size());
+ ArgVec.push_back(C);
+ auto GTI = gep_type_begin(Ty, Idxs), GTE = gep_type_end(Ty, Idxs);
+ for (; GTI != GTE; ++GTI) {
+ auto *Idx = cast<Constant>(GTI.getOperand());
+ assert(
+ (!isa<VectorType>(Idx->getType()) ||
+ cast<VectorType>(Idx->getType())->getElementCount() == EltCount) &&
+ "getelementptr index type missmatch");
+
+ if (GTI.isStruct() && Idx->getType()->isVectorTy()) {
+ Idx = Idx->getSplatValue();
+ } else if (GTI.isSequential() && EltCount.isNonZero() &&
+ !Idx->getType()->isVectorTy()) {
+ Idx = ConstantVector::getSplat(EltCount, Idx);
+ }
+ ArgVec.push_back(Idx);
+ }
+
+ unsigned SubClassOptionalData = InBounds ? GEPOperator::IsInBounds : 0;
+ if (InRangeIndex && *InRangeIndex < 63)
+ SubClassOptionalData |= (*InRangeIndex + 1) << 1;
+ const ConstantExprKeyType Key(Instruction::GetElementPtr, ArgVec, 0,
+ SubClassOptionalData, None, Ty);
+
+ LLVMContextImpl *pImpl = C->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
+}
+
+Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
+ Constant *RHS, bool OnlyIfReduced) {
+ auto Predicate = static_cast<CmpInst::Predicate>(pred);
+ assert(LHS->getType() == RHS->getType());
+ assert(CmpInst::isIntPredicate(Predicate) && "Invalid ICmp Predicate");
+
+ if (Constant *FC = ConstantFoldCompareInstruction(Predicate, LHS, RHS))
+ return FC; // Fold a few common cases...
+
+ if (OnlyIfReduced)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { LHS, RHS };
+ // Get the key type with both the opcode and predicate
+ const ConstantExprKeyType Key(Instruction::ICmp, ArgVec, Predicate);
+
+ Type *ResultTy = Type::getInt1Ty(LHS->getContext());
+ if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
+ ResultTy = VectorType::get(ResultTy, VT->getElementCount());
+
+ LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ResultTy, Key);
+}
+
+Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
+ Constant *RHS, bool OnlyIfReduced) {
+ auto Predicate = static_cast<CmpInst::Predicate>(pred);
+ assert(LHS->getType() == RHS->getType());
+ assert(CmpInst::isFPPredicate(Predicate) && "Invalid FCmp Predicate");
+
+ if (Constant *FC = ConstantFoldCompareInstruction(Predicate, LHS, RHS))
+ return FC; // Fold a few common cases...
+
+ if (OnlyIfReduced)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { LHS, RHS };
+ // Get the key type with both the opcode and predicate
+ const ConstantExprKeyType Key(Instruction::FCmp, ArgVec, Predicate);
+
+ Type *ResultTy = Type::getInt1Ty(LHS->getContext());
+ if (VectorType *VT = dyn_cast<VectorType>(LHS->getType()))
+ ResultTy = VectorType::get(ResultTy, VT->getElementCount());
+
+ LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ResultTy, Key);
+}
+
+Constant *ConstantExpr::getExtractElement(Constant *Val, Constant *Idx,
+ Type *OnlyIfReducedTy) {
+ assert(Val->getType()->isVectorTy() &&
+ "Tried to create extractelement operation on non-vector type!");
+ assert(Idx->getType()->isIntegerTy() &&
+ "Extractelement index must be an integer type!");
+
+ if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx))
+ return FC; // Fold a few common cases.
+
+ Type *ReqTy = cast<VectorType>(Val->getType())->getElementType();
+ if (OnlyIfReducedTy == ReqTy)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { Val, Idx };
+ const ConstantExprKeyType Key(Instruction::ExtractElement, ArgVec);
+
+ LLVMContextImpl *pImpl = Val->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ReqTy, Key);
+}
+
+Constant *ConstantExpr::getInsertElement(Constant *Val, Constant *Elt,
+ Constant *Idx, Type *OnlyIfReducedTy) {
+ assert(Val->getType()->isVectorTy() &&
+ "Tried to create insertelement operation on non-vector type!");
+ assert(Elt->getType() == cast<VectorType>(Val->getType())->getElementType() &&
+ "Insertelement types must match!");
+ assert(Idx->getType()->isIntegerTy() &&
+ "Insertelement index must be i32 type!");
+
+ if (Constant *FC = ConstantFoldInsertElementInstruction(Val, Elt, Idx))
+ return FC; // Fold a few common cases.
+
+ if (OnlyIfReducedTy == Val->getType())
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = { Val, Elt, Idx };
+ const ConstantExprKeyType Key(Instruction::InsertElement, ArgVec);
+
+ LLVMContextImpl *pImpl = Val->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(Val->getType(), Key);
+}
+
+Constant *ConstantExpr::getShuffleVector(Constant *V1, Constant *V2,
+ ArrayRef<int> Mask,
+ Type *OnlyIfReducedTy) {
+ assert(ShuffleVectorInst::isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector constant expr operands!");
+
+ if (Constant *FC = ConstantFoldShuffleVectorInstruction(V1, V2, Mask))
+ return FC; // Fold a few common cases.
+
+ unsigned NElts = Mask.size();
+ auto V1VTy = cast<VectorType>(V1->getType());
+ Type *EltTy = V1VTy->getElementType();
+ bool TypeIsScalable = isa<ScalableVectorType>(V1VTy);
+ Type *ShufTy = VectorType::get(EltTy, NElts, TypeIsScalable);
+
+ if (OnlyIfReducedTy == ShufTy)
+ return nullptr;
+
+ // Look up the constant in the table first to ensure uniqueness
+ Constant *ArgVec[] = {V1, V2};
+ ConstantExprKeyType Key(Instruction::ShuffleVector, ArgVec, 0, 0, Mask);
+
+ LLVMContextImpl *pImpl = ShufTy->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(ShufTy, Key);
+}
+
+Constant *ConstantExpr::getNeg(Constant *C, bool HasNUW, bool HasNSW) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ "Cannot NEG a nonintegral value!");
+ return getSub(ConstantFP::getZeroValueForNegation(C->getType()),
+ C, HasNUW, HasNSW);
+}
+
+Constant *ConstantExpr::getFNeg(Constant *C) {
+ assert(C->getType()->isFPOrFPVectorTy() &&
+ "Cannot FNEG a non-floating-point value!");
+ return get(Instruction::FNeg, C);
+}
+
+Constant *ConstantExpr::getNot(Constant *C) {
+ assert(C->getType()->isIntOrIntVectorTy() &&
+ "Cannot NOT a nonintegral value!");
+ return get(Instruction::Xor, C, Constant::getAllOnesValue(C->getType()));
+}
+
+Constant *ConstantExpr::getAdd(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Add, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getSub(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Sub, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getMul(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Mul, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getAnd(Constant *C1, Constant *C2) {
+ return get(Instruction::And, C1, C2);
+}
+
+Constant *ConstantExpr::getOr(Constant *C1, Constant *C2) {
+ return get(Instruction::Or, C1, C2);
+}
+
+Constant *ConstantExpr::getXor(Constant *C1, Constant *C2) {
+ return get(Instruction::Xor, C1, C2);
+}
+
+Constant *ConstantExpr::getUMin(Constant *C1, Constant *C2) {
+ Constant *Cmp = ConstantExpr::getICmp(CmpInst::ICMP_ULT, C1, C2);
+ return getSelect(Cmp, C1, C2);
+}
+
+Constant *ConstantExpr::getShl(Constant *C1, Constant *C2,
+ bool HasNUW, bool HasNSW) {
+ unsigned Flags = (HasNUW ? OverflowingBinaryOperator::NoUnsignedWrap : 0) |
+ (HasNSW ? OverflowingBinaryOperator::NoSignedWrap : 0);
+ return get(Instruction::Shl, C1, C2, Flags);
+}
+
+Constant *ConstantExpr::getLShr(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::LShr, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
+}
+
+Constant *ConstantExpr::getAShr(Constant *C1, Constant *C2, bool isExact) {
+ return get(Instruction::AShr, C1, C2,
+ isExact ? PossiblyExactOperator::IsExact : 0);
+}
+
+Constant *ConstantExpr::getExactLogBase2(Constant *C) {
+ Type *Ty = C->getType();
+ const APInt *IVal;
+ if (match(C, m_APInt(IVal)) && IVal->isPowerOf2())
+ return ConstantInt::get(Ty, IVal->logBase2());
+
+ // FIXME: We can extract pow of 2 of splat constant for scalable vectors.
+ auto *VecTy = dyn_cast<FixedVectorType>(Ty);
+ if (!VecTy)
+ return nullptr;
+
+ SmallVector<Constant *, 4> Elts;
+ for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
+ Constant *Elt = C->getAggregateElement(I);
+ if (!Elt)
+ return nullptr;
+ // Note that log2(iN undef) is *NOT* iN undef, because log2(iN undef) u< N.
+ if (isa<UndefValue>(Elt)) {
+ Elts.push_back(Constant::getNullValue(Ty->getScalarType()));
+ continue;
+ }
+ if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2())
+ return nullptr;
+ Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2()));
+ }
+
+ return ConstantVector::get(Elts);
+}
+
+Constant *ConstantExpr::getBinOpIdentity(unsigned Opcode, Type *Ty,
+ bool AllowRHSConstant, bool NSZ) {
+ assert(Instruction::isBinaryOp(Opcode) && "Only binops allowed");
+
+ // Commutative opcodes: it does not matter if AllowRHSConstant is set.
+ if (Instruction::isCommutative(Opcode)) {
+ switch (Opcode) {
+ case Instruction::Add: // X + 0 = X
+ case Instruction::Or: // X | 0 = X
+ case Instruction::Xor: // X ^ 0 = X
+ return Constant::getNullValue(Ty);
+ case Instruction::Mul: // X * 1 = X
+ return ConstantInt::get(Ty, 1);
+ case Instruction::And: // X & -1 = X
+ return Constant::getAllOnesValue(Ty);
+ case Instruction::FAdd: // X + -0.0 = X
+ return ConstantFP::getZero(Ty, !NSZ);
+ case Instruction::FMul: // X * 1.0 = X
+ return ConstantFP::get(Ty, 1.0);
+ default:
+ llvm_unreachable("Every commutative binop has an identity constant");
+ }
+ }
+
+ // Non-commutative opcodes: AllowRHSConstant must be set.
+ if (!AllowRHSConstant)
+ return nullptr;
+
+ switch (Opcode) {
+ case Instruction::Sub: // X - 0 = X
+ case Instruction::Shl: // X << 0 = X
+ case Instruction::LShr: // X >>u 0 = X
+ case Instruction::AShr: // X >> 0 = X
+ case Instruction::FSub: // X - 0.0 = X
+ return Constant::getNullValue(Ty);
+ case Instruction::SDiv: // X / 1 = X
+ case Instruction::UDiv: // X /u 1 = X
+ return ConstantInt::get(Ty, 1);
+ case Instruction::FDiv: // X / 1.0 = X
+ return ConstantFP::get(Ty, 1.0);
+ default:
+ return nullptr;
+ }
+}
+
+Constant *ConstantExpr::getBinOpAbsorber(unsigned Opcode, Type *Ty) {
+ switch (Opcode) {
+ default:
+ // Doesn't have an absorber.
+ return nullptr;
+
+ case Instruction::Or:
+ return Constant::getAllOnesValue(Ty);
+
+ case Instruction::And:
+ case Instruction::Mul:
+ return Constant::getNullValue(Ty);
+ }
+}
+
+/// Remove the constant from the constant table.
+void ConstantExpr::destroyConstantImpl() {
+ getType()->getContext().pImpl->ExprConstants.remove(this);
+}
+
+const char *ConstantExpr::getOpcodeName() const {
+ return Instruction::getOpcodeName(getOpcode());
+}
+
+GetElementPtrConstantExpr::GetElementPtrConstantExpr(
+ Type *SrcElementTy, Constant *C, ArrayRef<Constant *> IdxList, Type *DestTy)
+ : ConstantExpr(DestTy, Instruction::GetElementPtr,
+ OperandTraits<GetElementPtrConstantExpr>::op_end(this) -
+ (IdxList.size() + 1),
+ IdxList.size() + 1),
+ SrcElementTy(SrcElementTy),
+ ResElementTy(GetElementPtrInst::getIndexedType(SrcElementTy, IdxList)) {
+ Op<0>() = C;
+ Use *OperandList = getOperandList();
+ for (unsigned i = 0, E = IdxList.size(); i != E; ++i)
+ OperandList[i+1] = IdxList[i];
+}
+
+Type *GetElementPtrConstantExpr::getSourceElementType() const {
+ return SrcElementTy;
+}
+
+Type *GetElementPtrConstantExpr::getResultElementType() const {
+ return ResElementTy;
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantData* implementations
+
+Type *ConstantDataSequential::getElementType() const {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(getType()))
+ return ATy->getElementType();
+ return cast<VectorType>(getType())->getElementType();
+}
+
+StringRef ConstantDataSequential::getRawDataValues() const {
+ return StringRef(DataElements, getNumElements()*getElementByteSize());
+}
+
+bool ConstantDataSequential::isElementTypeCompatible(Type *Ty) {
+ if (Ty->isHalfTy() || Ty->isBFloatTy() || Ty->isFloatTy() || Ty->isDoubleTy())
+ return true;
+ if (auto *IT = dyn_cast<IntegerType>(Ty)) {
+ switch (IT->getBitWidth()) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ return true;
+ default: break;
+ }
+ }
+ return false;
+}
+
+unsigned ConstantDataSequential::getNumElements() const {
+ if (ArrayType *AT = dyn_cast<ArrayType>(getType()))
+ return AT->getNumElements();
+ return cast<FixedVectorType>(getType())->getNumElements();
+}
+
+
+uint64_t ConstantDataSequential::getElementByteSize() const {
+ return getElementType()->getPrimitiveSizeInBits()/8;
+}
+
+/// Return the start of the specified element.
+const char *ConstantDataSequential::getElementPointer(unsigned Elt) const {
+ assert(Elt < getNumElements() && "Invalid Elt");
+ return DataElements+Elt*getElementByteSize();
+}
+
+
+/// Return true if the array is empty or all zeros.
+static bool isAllZeros(StringRef Arr) {
+ for (char I : Arr)
+ if (I != 0)
+ return false;
+ return true;
+}
+
+/// This is the underlying implementation of all of the
+/// ConstantDataSequential::get methods. They all thunk down to here, providing
+/// the correct element type. We take the bytes in as a StringRef because
+/// we *want* an underlying "char*" to avoid TBAA type punning violations.
+Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
+#ifndef NDEBUG
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty))
+ assert(isElementTypeCompatible(ATy->getElementType()));
+ else
+ assert(isElementTypeCompatible(cast<VectorType>(Ty)->getElementType()));
+#endif
+ // If the elements are all zero or there are no elements, return a CAZ, which
+ // is more dense and canonical.
+ if (isAllZeros(Elements))
+ return ConstantAggregateZero::get(Ty);
+
+ // Do a lookup to see if we have already formed one of these.
+ auto &Slot =
+ *Ty->getContext()
+ .pImpl->CDSConstants.insert(std::make_pair(Elements, nullptr))
+ .first;
+
+ // The bucket can point to a linked list of different CDS's that have the same
+ // body but different types. For example, 0,0,0,1 could be a 4 element array
+ // of i8, or a 1-element array of i32. They'll both end up in the same
+ /// StringMap bucket, linked up by their Next pointers. Walk the list.
+ std::unique_ptr<ConstantDataSequential> *Entry = &Slot.second;
+ for (; *Entry; Entry = &(*Entry)->Next)
+ if ((*Entry)->getType() == Ty)
+ return Entry->get();
+
+ // Okay, we didn't get a hit. Create a node of the right class, link it in,
+ // and return it.
+ if (isa<ArrayType>(Ty)) {
+ // Use reset because std::make_unique can't access the constructor.
+ Entry->reset(new ConstantDataArray(Ty, Slot.first().data()));
+ return Entry->get();
+ }
+
+ assert(isa<VectorType>(Ty));
+ // Use reset because std::make_unique can't access the constructor.
+ Entry->reset(new ConstantDataVector(Ty, Slot.first().data()));
+ return Entry->get();
+}
+
+void ConstantDataSequential::destroyConstantImpl() {
+ // Remove the constant from the StringMap.
+ StringMap<std::unique_ptr<ConstantDataSequential>> &CDSConstants =
+ getType()->getContext().pImpl->CDSConstants;
+
+ auto Slot = CDSConstants.find(getRawDataValues());
+
+ assert(Slot != CDSConstants.end() && "CDS not found in uniquing table");
+
+ std::unique_ptr<ConstantDataSequential> *Entry = &Slot->getValue();
+
+ // Remove the entry from the hash table.
+ if (!(*Entry)->Next) {
+ // If there is only one value in the bucket (common case) it must be this
+ // entry, and removing the entry should remove the bucket completely.
+ assert(Entry->get() == this && "Hash mismatch in ConstantDataSequential");
+ getContext().pImpl->CDSConstants.erase(Slot);
+ return;
+ }
+
+ // Otherwise, there are multiple entries linked off the bucket, unlink the
+ // node we care about but keep the bucket around.
+ while (true) {
+ std::unique_ptr<ConstantDataSequential> &Node = *Entry;
+ assert(Node && "Didn't find entry in its uniquing hash table!");
+ // If we found our entry, unlink it from the list and we're done.
+ if (Node.get() == this) {
+ Node = std::move(Node->Next);
+ return;
+ }
+
+ Entry = &Node->Next;
+ }
+}
+
+/// getFP() constructors - Return a constant of array type with a float
+/// element type taken from argument `ElementType', and count taken from
+/// argument `Elts'. The amount of bits of the contained type must match the
+/// number of bits of the type contained in the passed in ArrayRef.
+/// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note
+/// that this can return a ConstantAggregateZero object.
+Constant *ConstantDataArray::getFP(Type *ElementType, ArrayRef<uint16_t> Elts) {
+ assert((ElementType->isHalfTy() || ElementType->isBFloatTy()) &&
+ "Element type is not a 16-bit float type");
+ Type *Ty = ArrayType::get(ElementType, Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
+}
+Constant *ConstantDataArray::getFP(Type *ElementType, ArrayRef<uint32_t> Elts) {
+ assert(ElementType->isFloatTy() && "Element type is not a 32-bit float type");
+ Type *Ty = ArrayType::get(ElementType, Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
+}
+Constant *ConstantDataArray::getFP(Type *ElementType, ArrayRef<uint64_t> Elts) {
+ assert(ElementType->isDoubleTy() &&
+ "Element type is not a 64-bit float type");
+ Type *Ty = ArrayType::get(ElementType, Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
+}
+
+Constant *ConstantDataArray::getString(LLVMContext &Context,
+ StringRef Str, bool AddNull) {
+ if (!AddNull) {
+ const uint8_t *Data = Str.bytes_begin();
+ return get(Context, makeArrayRef(Data, Str.size()));
+ }
+
+ SmallVector<uint8_t, 64> ElementVals;
+ ElementVals.append(Str.begin(), Str.end());
+ ElementVals.push_back(0);
+ return get(Context, ElementVals);
+}
+
+/// get() constructors - Return a constant with vector type with an element
+/// count and element type matching the ArrayRef passed in. Note that this
+/// can return a ConstantAggregateZero object.
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
+ auto *Ty = FixedVectorType::get(Type::getInt8Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
+ auto *Ty = FixedVectorType::get(Type::getInt16Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
+ auto *Ty = FixedVectorType::get(Type::getInt32Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
+ auto *Ty = FixedVectorType::get(Type::getInt64Ty(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
+ auto *Ty = FixedVectorType::get(Type::getFloatTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
+}
+Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
+ auto *Ty = FixedVectorType::get(Type::getDoubleTy(Context), Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
+}
+
+/// getFP() constructors - Return a constant of vector type with a float
+/// element type taken from argument `ElementType', and count taken from
+/// argument `Elts'. The amount of bits of the contained type must match the
+/// number of bits of the type contained in the passed in ArrayRef.
+/// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note
+/// that this can return a ConstantAggregateZero object.
+Constant *ConstantDataVector::getFP(Type *ElementType,
+ ArrayRef<uint16_t> Elts) {
+ assert((ElementType->isHalfTy() || ElementType->isBFloatTy()) &&
+ "Element type is not a 16-bit float type");
+ auto *Ty = FixedVectorType::get(ElementType, Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
+}
+Constant *ConstantDataVector::getFP(Type *ElementType,
+ ArrayRef<uint32_t> Elts) {
+ assert(ElementType->isFloatTy() && "Element type is not a 32-bit float type");
+ auto *Ty = FixedVectorType::get(ElementType, Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
+}
+Constant *ConstantDataVector::getFP(Type *ElementType,
+ ArrayRef<uint64_t> Elts) {
+ assert(ElementType->isDoubleTy() &&
+ "Element type is not a 64-bit float type");
+ auto *Ty = FixedVectorType::get(ElementType, Elts.size());
+ const char *Data = reinterpret_cast<const char *>(Elts.data());
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
+}
+
+Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
+ assert(isElementTypeCompatible(V->getType()) &&
+ "Element type not compatible with ConstantData");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ if (CI->getType()->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ if (CI->getType()->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ if (CI->getType()->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+ assert(CI->getType()->isIntegerTy(64) && "Unsupported ConstantData type");
+ SmallVector<uint64_t, 16> Elts(NumElts, CI->getZExtValue());
+ return get(V->getContext(), Elts);
+ }
+
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
+ if (CFP->getType()->isHalfTy()) {
+ SmallVector<uint16_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getType(), Elts);
+ }
+ if (CFP->getType()->isBFloatTy()) {
+ SmallVector<uint16_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getType(), Elts);
+ }
+ if (CFP->getType()->isFloatTy()) {
+ SmallVector<uint32_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getType(), Elts);
+ }
+ if (CFP->getType()->isDoubleTy()) {
+ SmallVector<uint64_t, 16> Elts(
+ NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue());
+ return getFP(V->getType(), Elts);
+ }
+ }
+ return ConstantVector::getSplat(ElementCount::getFixed(NumElts), V);
+}
+
+
+uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
+ assert(isa<IntegerType>(getElementType()) &&
+ "Accessor can only be used when element is an integer");
+ const char *EltPtr = getElementPointer(Elt);
+
+ // The data is stored in host byte order, make sure to cast back to the right
+ // type to load with the right endianness.
+ switch (getElementType()->getIntegerBitWidth()) {
+ default: llvm_unreachable("Invalid bitwidth for CDS");
+ case 8:
+ return *reinterpret_cast<const uint8_t *>(EltPtr);
+ case 16:
+ return *reinterpret_cast<const uint16_t *>(EltPtr);
+ case 32:
+ return *reinterpret_cast<const uint32_t *>(EltPtr);
+ case 64:
+ return *reinterpret_cast<const uint64_t *>(EltPtr);
+ }
+}
+
+APInt ConstantDataSequential::getElementAsAPInt(unsigned Elt) const {
+ assert(isa<IntegerType>(getElementType()) &&
+ "Accessor can only be used when element is an integer");
+ const char *EltPtr = getElementPointer(Elt);
+
+ // The data is stored in host byte order, make sure to cast back to the right
+ // type to load with the right endianness.
+ switch (getElementType()->getIntegerBitWidth()) {
+ default: llvm_unreachable("Invalid bitwidth for CDS");
+ case 8: {
+ auto EltVal = *reinterpret_cast<const uint8_t *>(EltPtr);
+ return APInt(8, EltVal);
+ }
+ case 16: {
+ auto EltVal = *reinterpret_cast<const uint16_t *>(EltPtr);
+ return APInt(16, EltVal);
+ }
+ case 32: {
+ auto EltVal = *reinterpret_cast<const uint32_t *>(EltPtr);
+ return APInt(32, EltVal);
+ }
+ case 64: {
+ auto EltVal = *reinterpret_cast<const uint64_t *>(EltPtr);
+ return APInt(64, EltVal);
+ }
+ }
+}
+
+APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
+ const char *EltPtr = getElementPointer(Elt);
+
+ switch (getElementType()->getTypeID()) {
+ default:
+ llvm_unreachable("Accessor can only be used when element is float/double!");
+ case Type::HalfTyID: {
+ auto EltVal = *reinterpret_cast<const uint16_t *>(EltPtr);
+ return APFloat(APFloat::IEEEhalf(), APInt(16, EltVal));
+ }
+ case Type::BFloatTyID: {
+ auto EltVal = *reinterpret_cast<const uint16_t *>(EltPtr);
+ return APFloat(APFloat::BFloat(), APInt(16, EltVal));
+ }
+ case Type::FloatTyID: {
+ auto EltVal = *reinterpret_cast<const uint32_t *>(EltPtr);
+ return APFloat(APFloat::IEEEsingle(), APInt(32, EltVal));
+ }
+ case Type::DoubleTyID: {
+ auto EltVal = *reinterpret_cast<const uint64_t *>(EltPtr);
+ return APFloat(APFloat::IEEEdouble(), APInt(64, EltVal));
+ }
+ }
+}
+
+float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
+ assert(getElementType()->isFloatTy() &&
+ "Accessor can only be used when element is a 'float'");
+ return *reinterpret_cast<const float *>(getElementPointer(Elt));
+}
+
+double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
+ assert(getElementType()->isDoubleTy() &&
+ "Accessor can only be used when element is a 'float'");
+ return *reinterpret_cast<const double *>(getElementPointer(Elt));
+}
+
+Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
+ if (getElementType()->isHalfTy() || getElementType()->isBFloatTy() ||
+ getElementType()->isFloatTy() || getElementType()->isDoubleTy())
+ return ConstantFP::get(getContext(), getElementAsAPFloat(Elt));
+
+ return ConstantInt::get(getElementType(), getElementAsInteger(Elt));
+}
+
+bool ConstantDataSequential::isString(unsigned CharSize) const {
+ return isa<ArrayType>(getType()) && getElementType()->isIntegerTy(CharSize);
+}
+
+bool ConstantDataSequential::isCString() const {
+ if (!isString())
+ return false;
+
+ StringRef Str = getAsString();
+
+ // The last value must be nul.
+ if (Str.back() != 0) return false;
+
+ // Other elements must be non-nul.
+ return !Str.drop_back().contains(0);
+}
+
+bool ConstantDataVector::isSplatData() const {
+ const char *Base = getRawDataValues().data();
+
+ // Compare elements 1+ to the 0'th element.
+ unsigned EltSize = getElementByteSize();
+ for (unsigned i = 1, e = getNumElements(); i != e; ++i)
+ if (memcmp(Base, Base+i*EltSize, EltSize))
+ return false;
+
+ return true;
+}
+
+bool ConstantDataVector::isSplat() const {
+ if (!IsSplatSet) {
+ IsSplatSet = true;
+ IsSplat = isSplatData();
+ }
+ return IsSplat;
+}
+
+Constant *ConstantDataVector::getSplatValue() const {
+ // If they're all the same, return the 0th one as a representative.
+ return isSplat() ? getElementAsConstant(0) : nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// handleOperandChange implementations
+
+/// Update this constant array to change uses of
+/// 'From' to be uses of 'To'. This must update the uniquing data structures
+/// etc.
+///
+/// Note that we intentionally replace all uses of From with To here. Consider
+/// a large array that uses 'From' 1000 times. By handling this case all here,
+/// ConstantArray::handleOperandChange is only invoked once, and that
+/// single invocation handles all 1000 uses. Handling them one at a time would
+/// work, but would be really slow because it would have to unique each updated
+/// array instance.
+///
+void Constant::handleOperandChange(Value *From, Value *To) {
+ Value *Replacement = nullptr;
+ switch (getValueID()) {
+ default:
+ llvm_unreachable("Not a constant!");
+#define HANDLE_CONSTANT(Name) \
+ case Value::Name##Val: \
+ Replacement = cast<Name>(this)->handleOperandChangeImpl(From, To); \
+ break;
+#include "llvm/IR/Value.def"
+ }
+
+ // If handleOperandChangeImpl returned nullptr, then it handled
+ // replacing itself and we don't want to delete or replace anything else here.
+ if (!Replacement)
+ return;
+
+ // I do need to replace this with an existing value.
+ assert(Replacement != this && "I didn't contain From!");
+
+ // Everyone using this now uses the replacement.
+ replaceAllUsesWith(Replacement);
+
+ // Delete the old constant!
+ destroyConstant();
+}
+
+Value *ConstantArray::handleOperandChangeImpl(Value *From, Value *To) {
+ assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
+ Constant *ToC = cast<Constant>(To);
+
+ SmallVector<Constant*, 8> Values;
+ Values.reserve(getNumOperands()); // Build replacement array.
+
+ // Fill values with the modified operands of the constant array. Also,
+ // compute whether this turns into an all-zeros array.
+ unsigned NumUpdated = 0;
+
+ // Keep track of whether all the values in the array are "ToC".
+ bool AllSame = true;
+ Use *OperandList = getOperandList();
+ unsigned OperandNo = 0;
+ for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ if (Val == From) {
+ OperandNo = (O - OperandList);
+ Val = ToC;
+ ++NumUpdated;
+ }
+ Values.push_back(Val);
+ AllSame &= Val == ToC;
+ }
+
+ if (AllSame && ToC->isNullValue())
+ return ConstantAggregateZero::get(getType());
+
+ if (AllSame && isa<UndefValue>(ToC))
+ return UndefValue::get(getType());
+
+ // Check for any other type of constant-folding.
+ if (Constant *C = getImpl(getType(), Values))
+ return C;
+
+ // Update to the new value.
+ return getContext().pImpl->ArrayConstants.replaceOperandsInPlace(
+ Values, this, From, ToC, NumUpdated, OperandNo);
+}
+
+Value *ConstantStruct::handleOperandChangeImpl(Value *From, Value *To) {
+ assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
+ Constant *ToC = cast<Constant>(To);
+
+ Use *OperandList = getOperandList();
+
+ SmallVector<Constant*, 8> Values;
+ Values.reserve(getNumOperands()); // Build replacement struct.
+
+ // Fill values with the modified operands of the constant struct. Also,
+ // compute whether this turns into an all-zeros struct.
+ unsigned NumUpdated = 0;
+ bool AllSame = true;
+ unsigned OperandNo = 0;
+ for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O) {
+ Constant *Val = cast<Constant>(O->get());
+ if (Val == From) {
+ OperandNo = (O - OperandList);
+ Val = ToC;
+ ++NumUpdated;
+ }
+ Values.push_back(Val);
+ AllSame &= Val == ToC;
+ }
+
+ if (AllSame && ToC->isNullValue())
+ return ConstantAggregateZero::get(getType());
+
+ if (AllSame && isa<UndefValue>(ToC))
+ return UndefValue::get(getType());
+
+ // Update to the new value.
+ return getContext().pImpl->StructConstants.replaceOperandsInPlace(
+ Values, this, From, ToC, NumUpdated, OperandNo);
+}
+
+Value *ConstantVector::handleOperandChangeImpl(Value *From, Value *To) {
+ assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
+ Constant *ToC = cast<Constant>(To);
+
+ SmallVector<Constant*, 8> Values;
+ Values.reserve(getNumOperands()); // Build replacement array...
+ unsigned NumUpdated = 0;
+ unsigned OperandNo = 0;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ Constant *Val = getOperand(i);
+ if (Val == From) {
+ OperandNo = i;
+ ++NumUpdated;
+ Val = ToC;
+ }
+ Values.push_back(Val);
+ }
+
+ if (Constant *C = getImpl(Values))
+ return C;
+
+ // Update to the new value.
+ return getContext().pImpl->VectorConstants.replaceOperandsInPlace(
+ Values, this, From, ToC, NumUpdated, OperandNo);
+}
+
+Value *ConstantExpr::handleOperandChangeImpl(Value *From, Value *ToV) {
+ assert(isa<Constant>(ToV) && "Cannot make Constant refer to non-constant!");
+ Constant *To = cast<Constant>(ToV);
+
+ SmallVector<Constant*, 8> NewOps;
+ unsigned NumUpdated = 0;
+ unsigned OperandNo = 0;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ Constant *Op = getOperand(i);
+ if (Op == From) {
+ OperandNo = i;
+ ++NumUpdated;
+ Op = To;
+ }
+ NewOps.push_back(Op);
+ }
+ assert(NumUpdated && "I didn't contain From!");
+
+ if (Constant *C = getWithOperands(NewOps, getType(), true))
+ return C;
+
+ // Update to the new value.
+ return getContext().pImpl->ExprConstants.replaceOperandsInPlace(
+ NewOps, this, From, To, NumUpdated, OperandNo);
+}
+
+Instruction *ConstantExpr::getAsInstruction(Instruction *InsertBefore) const {
+ SmallVector<Value *, 4> ValueOperands(operands());
+ ArrayRef<Value*> Ops(ValueOperands);
+
+ switch (getOpcode()) {
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ case Instruction::AddrSpaceCast:
+ return CastInst::Create((Instruction::CastOps)getOpcode(), Ops[0],
+ getType(), "", InsertBefore);
+ case Instruction::Select:
+ return SelectInst::Create(Ops[0], Ops[1], Ops[2], "", InsertBefore);
+ case Instruction::InsertElement:
+ return InsertElementInst::Create(Ops[0], Ops[1], Ops[2], "", InsertBefore);
+ case Instruction::ExtractElement:
+ return ExtractElementInst::Create(Ops[0], Ops[1], "", InsertBefore);
+ case Instruction::ShuffleVector:
+ return new ShuffleVectorInst(Ops[0], Ops[1], getShuffleMask(), "",
+ InsertBefore);
+
+ case Instruction::GetElementPtr: {
+ const auto *GO = cast<GEPOperator>(this);
+ if (GO->isInBounds())
+ return GetElementPtrInst::CreateInBounds(
+ GO->getSourceElementType(), Ops[0], Ops.slice(1), "", InsertBefore);
+ return GetElementPtrInst::Create(GO->getSourceElementType(), Ops[0],
+ Ops.slice(1), "", InsertBefore);
+ }
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ return CmpInst::Create((Instruction::OtherOps)getOpcode(),
+ (CmpInst::Predicate)getPredicate(), Ops[0], Ops[1],
+ "", InsertBefore);
+ case Instruction::FNeg:
+ return UnaryOperator::Create((Instruction::UnaryOps)getOpcode(), Ops[0], "",
+ InsertBefore);
+ default:
+ assert(getNumOperands() == 2 && "Must be binary operator?");
+ BinaryOperator *BO = BinaryOperator::Create(
+ (Instruction::BinaryOps)getOpcode(), Ops[0], Ops[1], "", InsertBefore);
+ if (isa<OverflowingBinaryOperator>(BO)) {
+ BO->setHasNoUnsignedWrap(SubclassOptionalData &
+ OverflowingBinaryOperator::NoUnsignedWrap);
+ BO->setHasNoSignedWrap(SubclassOptionalData &
+ OverflowingBinaryOperator::NoSignedWrap);
+ }
+ if (isa<PossiblyExactOperator>(BO))
+ BO->setIsExact(SubclassOptionalData & PossiblyExactOperator::IsExact);
+ return BO;
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/ConstantsContext.h b/contrib/llvm-project/llvm/lib/IR/ConstantsContext.h
new file mode 100644
index 000000000000..1d74e2d49f35
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ConstantsContext.h
@@ -0,0 +1,698 @@
+//===-- ConstantsContext.h - Constants-related Context Interals -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various helper methods and classes used by
+// LLVMContextImpl for creating and managing constants.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_CONSTANTSCONTEXT_H
+#define LLVM_LIB_IR_CONSTANTSCONTEXT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+#define DEBUG_TYPE "ir"
+
+namespace llvm {
+
+/// UnaryConstantExpr - This class is private to Constants.cpp, and is used
+/// behind the scenes to implement unary constant exprs.
+class UnaryConstantExpr final : public ConstantExpr {
+public:
+ UnaryConstantExpr(unsigned Opcode, Constant *C, Type *Ty)
+ : ConstantExpr(Ty, Opcode, &Op<0>(), 1) {
+ Op<0>() = C;
+ }
+
+ // allocate space for exactly one operand
+ void *operator new(size_t S) { return User::operator new(S, 1); }
+ void operator delete(void *Ptr) { User::operator delete(Ptr); }
+
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return Instruction::isCast(CE->getOpcode()) ||
+ Instruction::isUnaryOp(CE->getOpcode());
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// BinaryConstantExpr - This class is private to Constants.cpp, and is used
+/// behind the scenes to implement binary constant exprs.
+class BinaryConstantExpr final : public ConstantExpr {
+public:
+ BinaryConstantExpr(unsigned Opcode, Constant *C1, Constant *C2,
+ unsigned Flags)
+ : ConstantExpr(C1->getType(), Opcode, &Op<0>(), 2) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ SubclassOptionalData = Flags;
+ }
+
+ // allocate space for exactly two operands
+ void *operator new(size_t S) { return User::operator new(S, 2); }
+ void operator delete(void *Ptr) { User::operator delete(Ptr); }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return Instruction::isBinaryOp(CE->getOpcode());
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// SelectConstantExpr - This class is private to Constants.cpp, and is used
+/// behind the scenes to implement select constant exprs.
+class SelectConstantExpr final : public ConstantExpr {
+public:
+ SelectConstantExpr(Constant *C1, Constant *C2, Constant *C3)
+ : ConstantExpr(C2->getType(), Instruction::Select, &Op<0>(), 3) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ Op<2>() = C3;
+ }
+
+ // allocate space for exactly three operands
+ void *operator new(size_t S) { return User::operator new(S, 3); }
+ void operator delete(void *Ptr) { User::operator delete(Ptr); }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::Select;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// ExtractElementConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// extractelement constant exprs.
+class ExtractElementConstantExpr final : public ConstantExpr {
+public:
+ ExtractElementConstantExpr(Constant *C1, Constant *C2)
+ : ConstantExpr(cast<VectorType>(C1->getType())->getElementType(),
+ Instruction::ExtractElement, &Op<0>(), 2) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ }
+
+ // allocate space for exactly two operands
+ void *operator new(size_t S) { return User::operator new(S, 2); }
+ void operator delete(void *Ptr) { User::operator delete(Ptr); }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::ExtractElement;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// InsertElementConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// insertelement constant exprs.
+class InsertElementConstantExpr final : public ConstantExpr {
+public:
+ InsertElementConstantExpr(Constant *C1, Constant *C2, Constant *C3)
+ : ConstantExpr(C1->getType(), Instruction::InsertElement,
+ &Op<0>(), 3) {
+ Op<0>() = C1;
+ Op<1>() = C2;
+ Op<2>() = C3;
+ }
+
+ // allocate space for exactly three operands
+ void *operator new(size_t S) { return User::operator new(S, 3); }
+ void operator delete(void *Ptr) { User::operator delete(Ptr); }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::InsertElement;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// ShuffleVectorConstantExpr - This class is private to
+/// Constants.cpp, and is used behind the scenes to implement
+/// shufflevector constant exprs.
+class ShuffleVectorConstantExpr final : public ConstantExpr {
+public:
+ ShuffleVectorConstantExpr(Constant *C1, Constant *C2, ArrayRef<int> Mask)
+ : ConstantExpr(VectorType::get(
+ cast<VectorType>(C1->getType())->getElementType(),
+ Mask.size(), isa<ScalableVectorType>(C1->getType())),
+ Instruction::ShuffleVector, &Op<0>(), 2) {
+ assert(ShuffleVectorInst::isValidOperands(C1, C2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+ Op<0>() = C1;
+ Op<1>() = C2;
+ ShuffleMask.assign(Mask.begin(), Mask.end());
+ ShuffleMaskForBitcode =
+ ShuffleVectorInst::convertShuffleMaskForBitcode(Mask, getType());
+ }
+
+ SmallVector<int, 4> ShuffleMask;
+ Constant *ShuffleMaskForBitcode;
+
+ void *operator new(size_t S) { return User::operator new(S, 2); }
+ void operator delete(void *Ptr) { return User::operator delete(Ptr); }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::ShuffleVector;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+/// GetElementPtrConstantExpr - This class is private to Constants.cpp, and is
+/// used behind the scenes to implement getelementpr constant exprs.
+class GetElementPtrConstantExpr final : public ConstantExpr {
+ Type *SrcElementTy;
+ Type *ResElementTy;
+
+ GetElementPtrConstantExpr(Type *SrcElementTy, Constant *C,
+ ArrayRef<Constant *> IdxList, Type *DestTy);
+
+public:
+ static GetElementPtrConstantExpr *Create(Type *SrcElementTy, Constant *C,
+ ArrayRef<Constant *> IdxList,
+ Type *DestTy, unsigned Flags) {
+ GetElementPtrConstantExpr *Result = new (IdxList.size() + 1)
+ GetElementPtrConstantExpr(SrcElementTy, C, IdxList, DestTy);
+ Result->SubclassOptionalData = Flags;
+ return Result;
+ }
+
+ Type *getSourceElementType() const;
+ Type *getResultElementType() const;
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::GetElementPtr;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+// CompareConstantExpr - This class is private to Constants.cpp, and is used
+// behind the scenes to implement ICmp and FCmp constant expressions. This is
+// needed in order to store the predicate value for these instructions.
+class CompareConstantExpr final : public ConstantExpr {
+public:
+ unsigned short predicate;
+ CompareConstantExpr(Type *ty, Instruction::OtherOps opc,
+ unsigned short pred, Constant* LHS, Constant* RHS)
+ : ConstantExpr(ty, opc, &Op<0>(), 2), predicate(pred) {
+ Op<0>() = LHS;
+ Op<1>() = RHS;
+ }
+
+ // allocate space for exactly two operands
+ void *operator new(size_t S) { return User::operator new(S, 2); }
+ void operator delete(void *Ptr) { return User::operator delete(Ptr); }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ static bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::ICmp ||
+ CE->getOpcode() == Instruction::FCmp;
+ }
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V));
+ }
+};
+
+template <>
+struct OperandTraits<UnaryConstantExpr>
+ : public FixedNumOperandTraits<UnaryConstantExpr, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryConstantExpr, Value)
+
+template <>
+struct OperandTraits<BinaryConstantExpr>
+ : public FixedNumOperandTraits<BinaryConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryConstantExpr, Value)
+
+template <>
+struct OperandTraits<SelectConstantExpr>
+ : public FixedNumOperandTraits<SelectConstantExpr, 3> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectConstantExpr, Value)
+
+template <>
+struct OperandTraits<ExtractElementConstantExpr>
+ : public FixedNumOperandTraits<ExtractElementConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementConstantExpr, Value)
+
+template <>
+struct OperandTraits<InsertElementConstantExpr>
+ : public FixedNumOperandTraits<InsertElementConstantExpr, 3> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementConstantExpr, Value)
+
+template <>
+struct OperandTraits<ShuffleVectorConstantExpr>
+ : public FixedNumOperandTraits<ShuffleVectorConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorConstantExpr, Value)
+
+template <>
+struct OperandTraits<GetElementPtrConstantExpr>
+ : public VariadicOperandTraits<GetElementPtrConstantExpr, 1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrConstantExpr, Value)
+
+template <>
+struct OperandTraits<CompareConstantExpr>
+ : public FixedNumOperandTraits<CompareConstantExpr, 2> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CompareConstantExpr, Value)
+
+template <class ConstantClass> struct ConstantAggrKeyType;
+struct InlineAsmKeyType;
+struct ConstantExprKeyType;
+
+template <class ConstantClass> struct ConstantInfo;
+template <> struct ConstantInfo<ConstantExpr> {
+ using ValType = ConstantExprKeyType;
+ using TypeClass = Type;
+};
+template <> struct ConstantInfo<InlineAsm> {
+ using ValType = InlineAsmKeyType;
+ using TypeClass = PointerType;
+};
+template <> struct ConstantInfo<ConstantArray> {
+ using ValType = ConstantAggrKeyType<ConstantArray>;
+ using TypeClass = ArrayType;
+};
+template <> struct ConstantInfo<ConstantStruct> {
+ using ValType = ConstantAggrKeyType<ConstantStruct>;
+ using TypeClass = StructType;
+};
+template <> struct ConstantInfo<ConstantVector> {
+ using ValType = ConstantAggrKeyType<ConstantVector>;
+ using TypeClass = VectorType;
+};
+
+template <class ConstantClass> struct ConstantAggrKeyType {
+ ArrayRef<Constant *> Operands;
+
+ ConstantAggrKeyType(ArrayRef<Constant *> Operands) : Operands(Operands) {}
+
+ ConstantAggrKeyType(ArrayRef<Constant *> Operands, const ConstantClass *)
+ : Operands(Operands) {}
+
+ ConstantAggrKeyType(const ConstantClass *C,
+ SmallVectorImpl<Constant *> &Storage) {
+ assert(Storage.empty() && "Expected empty storage");
+ for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
+ Storage.push_back(C->getOperand(I));
+ Operands = Storage;
+ }
+
+ bool operator==(const ConstantAggrKeyType &X) const {
+ return Operands == X.Operands;
+ }
+
+ bool operator==(const ConstantClass *C) const {
+ if (Operands.size() != C->getNumOperands())
+ return false;
+ for (unsigned I = 0, E = Operands.size(); I != E; ++I)
+ if (Operands[I] != C->getOperand(I))
+ return false;
+ return true;
+ }
+
+ unsigned getHash() const {
+ return hash_combine_range(Operands.begin(), Operands.end());
+ }
+
+ using TypeClass = typename ConstantInfo<ConstantClass>::TypeClass;
+
+ ConstantClass *create(TypeClass *Ty) const {
+ return new (Operands.size()) ConstantClass(Ty, Operands);
+ }
+};
+
+struct InlineAsmKeyType {
+ StringRef AsmString;
+ StringRef Constraints;
+ FunctionType *FTy;
+ bool HasSideEffects;
+ bool IsAlignStack;
+ InlineAsm::AsmDialect AsmDialect;
+ bool CanThrow;
+
+ InlineAsmKeyType(StringRef AsmString, StringRef Constraints,
+ FunctionType *FTy, bool HasSideEffects, bool IsAlignStack,
+ InlineAsm::AsmDialect AsmDialect, bool canThrow)
+ : AsmString(AsmString), Constraints(Constraints), FTy(FTy),
+ HasSideEffects(HasSideEffects), IsAlignStack(IsAlignStack),
+ AsmDialect(AsmDialect), CanThrow(canThrow) {}
+
+ InlineAsmKeyType(const InlineAsm *Asm, SmallVectorImpl<Constant *> &)
+ : AsmString(Asm->getAsmString()), Constraints(Asm->getConstraintString()),
+ FTy(Asm->getFunctionType()), HasSideEffects(Asm->hasSideEffects()),
+ IsAlignStack(Asm->isAlignStack()), AsmDialect(Asm->getDialect()),
+ CanThrow(Asm->canThrow()) {}
+
+ bool operator==(const InlineAsmKeyType &X) const {
+ return HasSideEffects == X.HasSideEffects &&
+ IsAlignStack == X.IsAlignStack && AsmDialect == X.AsmDialect &&
+ AsmString == X.AsmString && Constraints == X.Constraints &&
+ FTy == X.FTy && CanThrow == X.CanThrow;
+ }
+
+ bool operator==(const InlineAsm *Asm) const {
+ return HasSideEffects == Asm->hasSideEffects() &&
+ IsAlignStack == Asm->isAlignStack() &&
+ AsmDialect == Asm->getDialect() &&
+ AsmString == Asm->getAsmString() &&
+ Constraints == Asm->getConstraintString() &&
+ FTy == Asm->getFunctionType() && CanThrow == Asm->canThrow();
+ }
+
+ unsigned getHash() const {
+ return hash_combine(AsmString, Constraints, HasSideEffects, IsAlignStack,
+ AsmDialect, FTy, CanThrow);
+ }
+
+ using TypeClass = ConstantInfo<InlineAsm>::TypeClass;
+
+ InlineAsm *create(TypeClass *Ty) const {
+ assert(PointerType::getUnqual(FTy) == Ty);
+ return new InlineAsm(FTy, std::string(AsmString), std::string(Constraints),
+ HasSideEffects, IsAlignStack, AsmDialect, CanThrow);
+ }
+};
+
+struct ConstantExprKeyType {
+private:
+ uint8_t Opcode;
+ uint8_t SubclassOptionalData;
+ uint16_t SubclassData;
+ ArrayRef<Constant *> Ops;
+ ArrayRef<int> ShuffleMask;
+ Type *ExplicitTy;
+
+ static ArrayRef<int> getShuffleMaskIfValid(const ConstantExpr *CE) {
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ return CE->getShuffleMask();
+ return None;
+ }
+
+ static Type *getSourceElementTypeIfValid(const ConstantExpr *CE) {
+ if (auto *GEPCE = dyn_cast<GetElementPtrConstantExpr>(CE))
+ return GEPCE->getSourceElementType();
+ return nullptr;
+ }
+
+public:
+ ConstantExprKeyType(unsigned Opcode, ArrayRef<Constant *> Ops,
+ unsigned short SubclassData = 0,
+ unsigned short SubclassOptionalData = 0,
+ ArrayRef<int> ShuffleMask = None,
+ Type *ExplicitTy = nullptr)
+ : Opcode(Opcode), SubclassOptionalData(SubclassOptionalData),
+ SubclassData(SubclassData), Ops(Ops), ShuffleMask(ShuffleMask),
+ ExplicitTy(ExplicitTy) {}
+
+ ConstantExprKeyType(ArrayRef<Constant *> Operands, const ConstantExpr *CE)
+ : Opcode(CE->getOpcode()),
+ SubclassOptionalData(CE->getRawSubclassOptionalData()),
+ SubclassData(CE->isCompare() ? CE->getPredicate() : 0), Ops(Operands),
+ ShuffleMask(getShuffleMaskIfValid(CE)),
+ ExplicitTy(getSourceElementTypeIfValid(CE)) {}
+
+ ConstantExprKeyType(const ConstantExpr *CE,
+ SmallVectorImpl<Constant *> &Storage)
+ : Opcode(CE->getOpcode()),
+ SubclassOptionalData(CE->getRawSubclassOptionalData()),
+ SubclassData(CE->isCompare() ? CE->getPredicate() : 0),
+ ShuffleMask(getShuffleMaskIfValid(CE)),
+ ExplicitTy(getSourceElementTypeIfValid(CE)) {
+ assert(Storage.empty() && "Expected empty storage");
+ for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I)
+ Storage.push_back(CE->getOperand(I));
+ Ops = Storage;
+ }
+
+ bool operator==(const ConstantExprKeyType &X) const {
+ return Opcode == X.Opcode && SubclassData == X.SubclassData &&
+ SubclassOptionalData == X.SubclassOptionalData && Ops == X.Ops &&
+ ShuffleMask == X.ShuffleMask && ExplicitTy == X.ExplicitTy;
+ }
+
+ bool operator==(const ConstantExpr *CE) const {
+ if (Opcode != CE->getOpcode())
+ return false;
+ if (SubclassOptionalData != CE->getRawSubclassOptionalData())
+ return false;
+ if (Ops.size() != CE->getNumOperands())
+ return false;
+ if (SubclassData != (CE->isCompare() ? CE->getPredicate() : 0))
+ return false;
+ for (unsigned I = 0, E = Ops.size(); I != E; ++I)
+ if (Ops[I] != CE->getOperand(I))
+ return false;
+ if (ShuffleMask != getShuffleMaskIfValid(CE))
+ return false;
+ if (ExplicitTy != getSourceElementTypeIfValid(CE))
+ return false;
+ return true;
+ }
+
+ unsigned getHash() const {
+ return hash_combine(
+ Opcode, SubclassOptionalData, SubclassData,
+ hash_combine_range(Ops.begin(), Ops.end()),
+ hash_combine_range(ShuffleMask.begin(), ShuffleMask.end()), ExplicitTy);
+ }
+
+ using TypeClass = ConstantInfo<ConstantExpr>::TypeClass;
+
+ ConstantExpr *create(TypeClass *Ty) const {
+ switch (Opcode) {
+ default:
+ if (Instruction::isCast(Opcode) ||
+ (Opcode >= Instruction::UnaryOpsBegin &&
+ Opcode < Instruction::UnaryOpsEnd))
+ return new UnaryConstantExpr(Opcode, Ops[0], Ty);
+ if ((Opcode >= Instruction::BinaryOpsBegin &&
+ Opcode < Instruction::BinaryOpsEnd))
+ return new BinaryConstantExpr(Opcode, Ops[0], Ops[1],
+ SubclassOptionalData);
+ llvm_unreachable("Invalid ConstantExpr!");
+ case Instruction::Select:
+ return new SelectConstantExpr(Ops[0], Ops[1], Ops[2]);
+ case Instruction::ExtractElement:
+ return new ExtractElementConstantExpr(Ops[0], Ops[1]);
+ case Instruction::InsertElement:
+ return new InsertElementConstantExpr(Ops[0], Ops[1], Ops[2]);
+ case Instruction::ShuffleVector:
+ return new ShuffleVectorConstantExpr(Ops[0], Ops[1], ShuffleMask);
+ case Instruction::GetElementPtr:
+ return GetElementPtrConstantExpr::Create(ExplicitTy, Ops[0], Ops.slice(1),
+ Ty, SubclassOptionalData);
+ case Instruction::ICmp:
+ return new CompareConstantExpr(Ty, Instruction::ICmp, SubclassData,
+ Ops[0], Ops[1]);
+ case Instruction::FCmp:
+ return new CompareConstantExpr(Ty, Instruction::FCmp, SubclassData,
+ Ops[0], Ops[1]);
+ }
+ }
+};
+
+// Free memory for a given constant. Assumes the constant has already been
+// removed from all relevant maps.
+void deleteConstant(Constant *C);
+
+template <class ConstantClass> class ConstantUniqueMap {
+public:
+ using ValType = typename ConstantInfo<ConstantClass>::ValType;
+ using TypeClass = typename ConstantInfo<ConstantClass>::TypeClass;
+ using LookupKey = std::pair<TypeClass *, ValType>;
+
+ /// Key and hash together, so that we compute the hash only once and reuse it.
+ using LookupKeyHashed = std::pair<unsigned, LookupKey>;
+
+private:
+ struct MapInfo {
+ using ConstantClassInfo = DenseMapInfo<ConstantClass *>;
+
+ static inline ConstantClass *getEmptyKey() {
+ return ConstantClassInfo::getEmptyKey();
+ }
+
+ static inline ConstantClass *getTombstoneKey() {
+ return ConstantClassInfo::getTombstoneKey();
+ }
+
+ static unsigned getHashValue(const ConstantClass *CP) {
+ SmallVector<Constant *, 32> Storage;
+ return getHashValue(LookupKey(CP->getType(), ValType(CP, Storage)));
+ }
+
+ static bool isEqual(const ConstantClass *LHS, const ConstantClass *RHS) {
+ return LHS == RHS;
+ }
+
+ static unsigned getHashValue(const LookupKey &Val) {
+ return hash_combine(Val.first, Val.second.getHash());
+ }
+
+ static unsigned getHashValue(const LookupKeyHashed &Val) {
+ return Val.first;
+ }
+
+ static bool isEqual(const LookupKey &LHS, const ConstantClass *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ if (LHS.first != RHS->getType())
+ return false;
+ return LHS.second == RHS;
+ }
+
+ static bool isEqual(const LookupKeyHashed &LHS, const ConstantClass *RHS) {
+ return isEqual(LHS.second, RHS);
+ }
+ };
+
+public:
+ using MapTy = DenseSet<ConstantClass *, MapInfo>;
+
+private:
+ MapTy Map;
+
+public:
+ typename MapTy::iterator begin() { return Map.begin(); }
+ typename MapTy::iterator end() { return Map.end(); }
+
+ void freeConstants() {
+ for (auto &I : Map)
+ deleteConstant(I);
+ }
+
+private:
+ ConstantClass *create(TypeClass *Ty, ValType V, LookupKeyHashed &HashKey) {
+ ConstantClass *Result = V.create(Ty);
+
+ assert(Result->getType() == Ty && "Type specified is not correct!");
+ Map.insert_as(Result, HashKey);
+
+ return Result;
+ }
+
+public:
+ /// Return the specified constant from the map, creating it if necessary.
+ ConstantClass *getOrCreate(TypeClass *Ty, ValType V) {
+ LookupKey Key(Ty, V);
+ /// Hash once, and reuse it for the lookup and the insertion if needed.
+ LookupKeyHashed Lookup(MapInfo::getHashValue(Key), Key);
+
+ ConstantClass *Result = nullptr;
+
+ auto I = Map.find_as(Lookup);
+ if (I == Map.end())
+ Result = create(Ty, V, Lookup);
+ else
+ Result = *I;
+ assert(Result && "Unexpected nullptr");
+
+ return Result;
+ }
+
+ /// Remove this constant from the map
+ void remove(ConstantClass *CP) {
+ typename MapTy::iterator I = Map.find(CP);
+ assert(I != Map.end() && "Constant not found in constant table!");
+ assert(*I == CP && "Didn't find correct element?");
+ Map.erase(I);
+ }
+
+ ConstantClass *replaceOperandsInPlace(ArrayRef<Constant *> Operands,
+ ConstantClass *CP, Value *From,
+ Constant *To, unsigned NumUpdated = 0,
+ unsigned OperandNo = ~0u) {
+ LookupKey Key(CP->getType(), ValType(Operands, CP));
+ /// Hash once, and reuse it for the lookup and the insertion if needed.
+ LookupKeyHashed Lookup(MapInfo::getHashValue(Key), Key);
+
+ auto ItMap = Map.find_as(Lookup);
+ if (ItMap != Map.end())
+ return *ItMap;
+
+ // Update to the new value. Optimize for the case when we have a single
+ // operand that we're changing, but handle bulk updates efficiently.
+ remove(CP);
+ if (NumUpdated == 1) {
+ assert(OperandNo < CP->getNumOperands() && "Invalid index");
+ assert(CP->getOperand(OperandNo) != To && "I didn't contain From!");
+ CP->setOperand(OperandNo, To);
+ } else {
+ for (unsigned I = 0, E = CP->getNumOperands(); I != E; ++I)
+ if (CP->getOperand(I) == From)
+ CP->setOperand(I, To);
+ }
+ Map.insert_as(CP, Lookup);
+ return nullptr;
+ }
+
+ void dump() const {
+ LLVM_DEBUG(dbgs() << "Constant.cpp: ConstantUniqueMap\n");
+ }
+};
+
+template <> inline void ConstantUniqueMap<InlineAsm>::freeConstants() {
+ for (auto &I : Map)
+ delete I;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_IR_CONSTANTSCONTEXT_H
diff --git a/contrib/llvm-project/llvm/lib/IR/Core.cpp b/contrib/llvm-project/llvm/lib/IR/Core.cpp
new file mode 100644
index 000000000000..08b7b0e1f956
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Core.cpp
@@ -0,0 +1,4202 @@
+//===-- Core.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the common infrastructure (including the C bindings)
+// for libLLVMCore.a, which implements the LLVM intermediate representation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/Core.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <system_error>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ir"
+
+void llvm::initializeCore(PassRegistry &Registry) {
+ initializeDominatorTreeWrapperPassPass(Registry);
+ initializePrintModulePassWrapperPass(Registry);
+ initializePrintFunctionPassWrapperPass(Registry);
+ initializeSafepointIRVerifierPass(Registry);
+ initializeVerifierLegacyPassPass(Registry);
+}
+
+void LLVMInitializeCore(LLVMPassRegistryRef R) {
+ initializeCore(*unwrap(R));
+}
+
+void LLVMShutdown() {
+ llvm_shutdown();
+}
+
+/*===-- Error handling ----------------------------------------------------===*/
+
+char *LLVMCreateMessage(const char *Message) {
+ return strdup(Message);
+}
+
+void LLVMDisposeMessage(char *Message) {
+ free(Message);
+}
+
+
+/*===-- Operations on contexts --------------------------------------------===*/
+
+static LLVMContext &getGlobalContext() {
+ static LLVMContext GlobalContext;
+ return GlobalContext;
+}
+
+LLVMContextRef LLVMContextCreate() {
+ return wrap(new LLVMContext());
+}
+
+LLVMContextRef LLVMGetGlobalContext() { return wrap(&getGlobalContext()); }
+
+void LLVMContextSetDiagnosticHandler(LLVMContextRef C,
+ LLVMDiagnosticHandler Handler,
+ void *DiagnosticContext) {
+ unwrap(C)->setDiagnosticHandlerCallBack(
+ LLVM_EXTENSION reinterpret_cast<DiagnosticHandler::DiagnosticHandlerTy>(
+ Handler),
+ DiagnosticContext);
+}
+
+LLVMDiagnosticHandler LLVMContextGetDiagnosticHandler(LLVMContextRef C) {
+ return LLVM_EXTENSION reinterpret_cast<LLVMDiagnosticHandler>(
+ unwrap(C)->getDiagnosticHandlerCallBack());
+}
+
+void *LLVMContextGetDiagnosticContext(LLVMContextRef C) {
+ return unwrap(C)->getDiagnosticContext();
+}
+
+void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
+ void *OpaqueHandle) {
+ auto YieldCallback =
+ LLVM_EXTENSION reinterpret_cast<LLVMContext::YieldCallbackTy>(Callback);
+ unwrap(C)->setYieldCallback(YieldCallback, OpaqueHandle);
+}
+
+LLVMBool LLVMContextShouldDiscardValueNames(LLVMContextRef C) {
+ return unwrap(C)->shouldDiscardValueNames();
+}
+
+void LLVMContextSetDiscardValueNames(LLVMContextRef C, LLVMBool Discard) {
+ unwrap(C)->setDiscardValueNames(Discard);
+}
+
+void LLVMContextSetOpaquePointers(LLVMContextRef C, LLVMBool OpaquePointers) {
+ unwrap(C)->setOpaquePointers(OpaquePointers);
+}
+
+void LLVMContextDispose(LLVMContextRef C) {
+ delete unwrap(C);
+}
+
+unsigned LLVMGetMDKindIDInContext(LLVMContextRef C, const char *Name,
+ unsigned SLen) {
+ return unwrap(C)->getMDKindID(StringRef(Name, SLen));
+}
+
+unsigned LLVMGetMDKindID(const char *Name, unsigned SLen) {
+ return LLVMGetMDKindIDInContext(LLVMGetGlobalContext(), Name, SLen);
+}
+
+unsigned LLVMGetEnumAttributeKindForName(const char *Name, size_t SLen) {
+ return Attribute::getAttrKindFromName(StringRef(Name, SLen));
+}
+
+unsigned LLVMGetLastEnumAttributeKind(void) {
+ return Attribute::AttrKind::EndAttrKinds;
+}
+
+LLVMAttributeRef LLVMCreateEnumAttribute(LLVMContextRef C, unsigned KindID,
+ uint64_t Val) {
+ auto &Ctx = *unwrap(C);
+ auto AttrKind = (Attribute::AttrKind)KindID;
+
+ if (AttrKind == Attribute::AttrKind::ByVal) {
+ // After r362128, byval attributes need to have a type attribute. Provide a
+ // NULL one until a proper API is added for this.
+ return wrap(Attribute::getWithByValType(Ctx, nullptr));
+ }
+
+ if (AttrKind == Attribute::AttrKind::StructRet) {
+ // Same as byval.
+ return wrap(Attribute::getWithStructRetType(Ctx, nullptr));
+ }
+
+ return wrap(Attribute::get(Ctx, AttrKind, Val));
+}
+
+unsigned LLVMGetEnumAttributeKind(LLVMAttributeRef A) {
+ return unwrap(A).getKindAsEnum();
+}
+
+uint64_t LLVMGetEnumAttributeValue(LLVMAttributeRef A) {
+ auto Attr = unwrap(A);
+ if (Attr.isEnumAttribute())
+ return 0;
+ return Attr.getValueAsInt();
+}
+
+LLVMAttributeRef LLVMCreateTypeAttribute(LLVMContextRef C, unsigned KindID,
+ LLVMTypeRef type_ref) {
+ auto &Ctx = *unwrap(C);
+ auto AttrKind = (Attribute::AttrKind)KindID;
+ return wrap(Attribute::get(Ctx, AttrKind, unwrap(type_ref)));
+}
+
+LLVMTypeRef LLVMGetTypeAttributeValue(LLVMAttributeRef A) {
+ auto Attr = unwrap(A);
+ return wrap(Attr.getValueAsType());
+}
+
+LLVMAttributeRef LLVMCreateStringAttribute(LLVMContextRef C,
+ const char *K, unsigned KLength,
+ const char *V, unsigned VLength) {
+ return wrap(Attribute::get(*unwrap(C), StringRef(K, KLength),
+ StringRef(V, VLength)));
+}
+
+const char *LLVMGetStringAttributeKind(LLVMAttributeRef A,
+ unsigned *Length) {
+ auto S = unwrap(A).getKindAsString();
+ *Length = S.size();
+ return S.data();
+}
+
+const char *LLVMGetStringAttributeValue(LLVMAttributeRef A,
+ unsigned *Length) {
+ auto S = unwrap(A).getValueAsString();
+ *Length = S.size();
+ return S.data();
+}
+
+LLVMBool LLVMIsEnumAttribute(LLVMAttributeRef A) {
+ auto Attr = unwrap(A);
+ return Attr.isEnumAttribute() || Attr.isIntAttribute();
+}
+
+LLVMBool LLVMIsStringAttribute(LLVMAttributeRef A) {
+ return unwrap(A).isStringAttribute();
+}
+
+LLVMBool LLVMIsTypeAttribute(LLVMAttributeRef A) {
+ return unwrap(A).isTypeAttribute();
+}
+
+char *LLVMGetDiagInfoDescription(LLVMDiagnosticInfoRef DI) {
+ std::string MsgStorage;
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+
+ unwrap(DI)->print(DP);
+ Stream.flush();
+
+ return LLVMCreateMessage(MsgStorage.c_str());
+}
+
+LLVMDiagnosticSeverity LLVMGetDiagInfoSeverity(LLVMDiagnosticInfoRef DI) {
+ LLVMDiagnosticSeverity severity;
+
+ switch(unwrap(DI)->getSeverity()) {
+ default:
+ severity = LLVMDSError;
+ break;
+ case DS_Warning:
+ severity = LLVMDSWarning;
+ break;
+ case DS_Remark:
+ severity = LLVMDSRemark;
+ break;
+ case DS_Note:
+ severity = LLVMDSNote;
+ break;
+ }
+
+ return severity;
+}
+
+/*===-- Operations on modules ---------------------------------------------===*/
+
+LLVMModuleRef LLVMModuleCreateWithName(const char *ModuleID) {
+ return wrap(new Module(ModuleID, getGlobalContext()));
+}
+
+LLVMModuleRef LLVMModuleCreateWithNameInContext(const char *ModuleID,
+ LLVMContextRef C) {
+ return wrap(new Module(ModuleID, *unwrap(C)));
+}
+
+void LLVMDisposeModule(LLVMModuleRef M) {
+ delete unwrap(M);
+}
+
+const char *LLVMGetModuleIdentifier(LLVMModuleRef M, size_t *Len) {
+ auto &Str = unwrap(M)->getModuleIdentifier();
+ *Len = Str.length();
+ return Str.c_str();
+}
+
+void LLVMSetModuleIdentifier(LLVMModuleRef M, const char *Ident, size_t Len) {
+ unwrap(M)->setModuleIdentifier(StringRef(Ident, Len));
+}
+
+const char *LLVMGetSourceFileName(LLVMModuleRef M, size_t *Len) {
+ auto &Str = unwrap(M)->getSourceFileName();
+ *Len = Str.length();
+ return Str.c_str();
+}
+
+void LLVMSetSourceFileName(LLVMModuleRef M, const char *Name, size_t Len) {
+ unwrap(M)->setSourceFileName(StringRef(Name, Len));
+}
+
+/*--.. Data layout .........................................................--*/
+const char *LLVMGetDataLayoutStr(LLVMModuleRef M) {
+ return unwrap(M)->getDataLayoutStr().c_str();
+}
+
+const char *LLVMGetDataLayout(LLVMModuleRef M) {
+ return LLVMGetDataLayoutStr(M);
+}
+
+void LLVMSetDataLayout(LLVMModuleRef M, const char *DataLayoutStr) {
+ unwrap(M)->setDataLayout(DataLayoutStr);
+}
+
+/*--.. Target triple .......................................................--*/
+const char * LLVMGetTarget(LLVMModuleRef M) {
+ return unwrap(M)->getTargetTriple().c_str();
+}
+
+void LLVMSetTarget(LLVMModuleRef M, const char *Triple) {
+ unwrap(M)->setTargetTriple(Triple);
+}
+
+/*--.. Module flags ........................................................--*/
+struct LLVMOpaqueModuleFlagEntry {
+ LLVMModuleFlagBehavior Behavior;
+ const char *Key;
+ size_t KeyLen;
+ LLVMMetadataRef Metadata;
+};
+
+static Module::ModFlagBehavior
+map_to_llvmModFlagBehavior(LLVMModuleFlagBehavior Behavior) {
+ switch (Behavior) {
+ case LLVMModuleFlagBehaviorError:
+ return Module::ModFlagBehavior::Error;
+ case LLVMModuleFlagBehaviorWarning:
+ return Module::ModFlagBehavior::Warning;
+ case LLVMModuleFlagBehaviorRequire:
+ return Module::ModFlagBehavior::Require;
+ case LLVMModuleFlagBehaviorOverride:
+ return Module::ModFlagBehavior::Override;
+ case LLVMModuleFlagBehaviorAppend:
+ return Module::ModFlagBehavior::Append;
+ case LLVMModuleFlagBehaviorAppendUnique:
+ return Module::ModFlagBehavior::AppendUnique;
+ }
+ llvm_unreachable("Unknown LLVMModuleFlagBehavior");
+}
+
+static LLVMModuleFlagBehavior
+map_from_llvmModFlagBehavior(Module::ModFlagBehavior Behavior) {
+ switch (Behavior) {
+ case Module::ModFlagBehavior::Error:
+ return LLVMModuleFlagBehaviorError;
+ case Module::ModFlagBehavior::Warning:
+ return LLVMModuleFlagBehaviorWarning;
+ case Module::ModFlagBehavior::Require:
+ return LLVMModuleFlagBehaviorRequire;
+ case Module::ModFlagBehavior::Override:
+ return LLVMModuleFlagBehaviorOverride;
+ case Module::ModFlagBehavior::Append:
+ return LLVMModuleFlagBehaviorAppend;
+ case Module::ModFlagBehavior::AppendUnique:
+ return LLVMModuleFlagBehaviorAppendUnique;
+ default:
+ llvm_unreachable("Unhandled Flag Behavior");
+ }
+}
+
+LLVMModuleFlagEntry *LLVMCopyModuleFlagsMetadata(LLVMModuleRef M, size_t *Len) {
+ SmallVector<Module::ModuleFlagEntry, 8> MFEs;
+ unwrap(M)->getModuleFlagsMetadata(MFEs);
+
+ LLVMOpaqueModuleFlagEntry *Result = static_cast<LLVMOpaqueModuleFlagEntry *>(
+ safe_malloc(MFEs.size() * sizeof(LLVMOpaqueModuleFlagEntry)));
+ for (unsigned i = 0; i < MFEs.size(); ++i) {
+ const auto &ModuleFlag = MFEs[i];
+ Result[i].Behavior = map_from_llvmModFlagBehavior(ModuleFlag.Behavior);
+ Result[i].Key = ModuleFlag.Key->getString().data();
+ Result[i].KeyLen = ModuleFlag.Key->getString().size();
+ Result[i].Metadata = wrap(ModuleFlag.Val);
+ }
+ *Len = MFEs.size();
+ return Result;
+}
+
+void LLVMDisposeModuleFlagsMetadata(LLVMModuleFlagEntry *Entries) {
+ free(Entries);
+}
+
+LLVMModuleFlagBehavior
+LLVMModuleFlagEntriesGetFlagBehavior(LLVMModuleFlagEntry *Entries,
+ unsigned Index) {
+ LLVMOpaqueModuleFlagEntry MFE =
+ static_cast<LLVMOpaqueModuleFlagEntry>(Entries[Index]);
+ return MFE.Behavior;
+}
+
+const char *LLVMModuleFlagEntriesGetKey(LLVMModuleFlagEntry *Entries,
+ unsigned Index, size_t *Len) {
+ LLVMOpaqueModuleFlagEntry MFE =
+ static_cast<LLVMOpaqueModuleFlagEntry>(Entries[Index]);
+ *Len = MFE.KeyLen;
+ return MFE.Key;
+}
+
+LLVMMetadataRef LLVMModuleFlagEntriesGetMetadata(LLVMModuleFlagEntry *Entries,
+ unsigned Index) {
+ LLVMOpaqueModuleFlagEntry MFE =
+ static_cast<LLVMOpaqueModuleFlagEntry>(Entries[Index]);
+ return MFE.Metadata;
+}
+
+LLVMMetadataRef LLVMGetModuleFlag(LLVMModuleRef M,
+ const char *Key, size_t KeyLen) {
+ return wrap(unwrap(M)->getModuleFlag({Key, KeyLen}));
+}
+
+void LLVMAddModuleFlag(LLVMModuleRef M, LLVMModuleFlagBehavior Behavior,
+ const char *Key, size_t KeyLen,
+ LLVMMetadataRef Val) {
+ unwrap(M)->addModuleFlag(map_to_llvmModFlagBehavior(Behavior),
+ {Key, KeyLen}, unwrap(Val));
+}
+
+/*--.. Printing modules ....................................................--*/
+
+void LLVMDumpModule(LLVMModuleRef M) {
+ unwrap(M)->print(errs(), nullptr,
+ /*ShouldPreserveUseListOrder=*/false, /*IsForDebug=*/true);
+}
+
+LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
+ char **ErrorMessage) {
+ std::error_code EC;
+ raw_fd_ostream dest(Filename, EC, sys::fs::OF_TextWithCRLF);
+ if (EC) {
+ *ErrorMessage = strdup(EC.message().c_str());
+ return true;
+ }
+
+ unwrap(M)->print(dest, nullptr);
+
+ dest.close();
+
+ if (dest.has_error()) {
+ std::string E = "Error printing to file: " + dest.error().message();
+ *ErrorMessage = strdup(E.c_str());
+ return true;
+ }
+
+ return false;
+}
+
+char *LLVMPrintModuleToString(LLVMModuleRef M) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ unwrap(M)->print(os, nullptr);
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
+/*--.. Operations on inline assembler ......................................--*/
+void LLVMSetModuleInlineAsm2(LLVMModuleRef M, const char *Asm, size_t Len) {
+ unwrap(M)->setModuleInlineAsm(StringRef(Asm, Len));
+}
+
+void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
+ unwrap(M)->setModuleInlineAsm(StringRef(Asm));
+}
+
+void LLVMAppendModuleInlineAsm(LLVMModuleRef M, const char *Asm, size_t Len) {
+ unwrap(M)->appendModuleInlineAsm(StringRef(Asm, Len));
+}
+
+const char *LLVMGetModuleInlineAsm(LLVMModuleRef M, size_t *Len) {
+ auto &Str = unwrap(M)->getModuleInlineAsm();
+ *Len = Str.length();
+ return Str.c_str();
+}
+
+LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, char *AsmString,
+ size_t AsmStringSize, char *Constraints,
+ size_t ConstraintsSize, LLVMBool HasSideEffects,
+ LLVMBool IsAlignStack,
+ LLVMInlineAsmDialect Dialect, LLVMBool CanThrow) {
+ InlineAsm::AsmDialect AD;
+ switch (Dialect) {
+ case LLVMInlineAsmDialectATT:
+ AD = InlineAsm::AD_ATT;
+ break;
+ case LLVMInlineAsmDialectIntel:
+ AD = InlineAsm::AD_Intel;
+ break;
+ }
+ return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
+ StringRef(AsmString, AsmStringSize),
+ StringRef(Constraints, ConstraintsSize),
+ HasSideEffects, IsAlignStack, AD, CanThrow));
+}
+
+/*--.. Operations on module contexts ......................................--*/
+LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) {
+ return wrap(&unwrap(M)->getContext());
+}
+
+
+/*===-- Operations on types -----------------------------------------------===*/
+
+/*--.. Operations on all types (mostly) ....................................--*/
+
+LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
+ switch (unwrap(Ty)->getTypeID()) {
+ case Type::VoidTyID:
+ return LLVMVoidTypeKind;
+ case Type::HalfTyID:
+ return LLVMHalfTypeKind;
+ case Type::BFloatTyID:
+ return LLVMBFloatTypeKind;
+ case Type::FloatTyID:
+ return LLVMFloatTypeKind;
+ case Type::DoubleTyID:
+ return LLVMDoubleTypeKind;
+ case Type::X86_FP80TyID:
+ return LLVMX86_FP80TypeKind;
+ case Type::FP128TyID:
+ return LLVMFP128TypeKind;
+ case Type::PPC_FP128TyID:
+ return LLVMPPC_FP128TypeKind;
+ case Type::LabelTyID:
+ return LLVMLabelTypeKind;
+ case Type::MetadataTyID:
+ return LLVMMetadataTypeKind;
+ case Type::IntegerTyID:
+ return LLVMIntegerTypeKind;
+ case Type::FunctionTyID:
+ return LLVMFunctionTypeKind;
+ case Type::StructTyID:
+ return LLVMStructTypeKind;
+ case Type::ArrayTyID:
+ return LLVMArrayTypeKind;
+ case Type::PointerTyID:
+ return LLVMPointerTypeKind;
+ case Type::FixedVectorTyID:
+ return LLVMVectorTypeKind;
+ case Type::X86_MMXTyID:
+ return LLVMX86_MMXTypeKind;
+ case Type::X86_AMXTyID:
+ return LLVMX86_AMXTypeKind;
+ case Type::TokenTyID:
+ return LLVMTokenTypeKind;
+ case Type::ScalableVectorTyID:
+ return LLVMScalableVectorTypeKind;
+ case Type::DXILPointerTyID:
+ llvm_unreachable("DXIL pointers are unsupported via the C API");
+ }
+ llvm_unreachable("Unhandled TypeID.");
+}
+
+LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty)
+{
+ return unwrap(Ty)->isSized();
+}
+
+LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty) {
+ return wrap(&unwrap(Ty)->getContext());
+}
+
+void LLVMDumpType(LLVMTypeRef Ty) {
+ return unwrap(Ty)->print(errs(), /*IsForDebug=*/true);
+}
+
+char *LLVMPrintTypeToString(LLVMTypeRef Ty) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ if (unwrap(Ty))
+ unwrap(Ty)->print(os);
+ else
+ os << "Printing <null> Type";
+
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
+/*--.. Operations on integer types .........................................--*/
+
+LLVMTypeRef LLVMInt1TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt1Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt8TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt8Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt16TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt16Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt32TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt32Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt64TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt64Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMInt128TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getInt128Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMIntTypeInContext(LLVMContextRef C, unsigned NumBits) {
+ return wrap(IntegerType::get(*unwrap(C), NumBits));
+}
+
+LLVMTypeRef LLVMInt1Type(void) {
+ return LLVMInt1TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt8Type(void) {
+ return LLVMInt8TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt16Type(void) {
+ return LLVMInt16TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt32Type(void) {
+ return LLVMInt32TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt64Type(void) {
+ return LLVMInt64TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMInt128Type(void) {
+ return LLVMInt128TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMIntType(unsigned NumBits) {
+ return LLVMIntTypeInContext(LLVMGetGlobalContext(), NumBits);
+}
+
+unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy) {
+ return unwrap<IntegerType>(IntegerTy)->getBitWidth();
+}
+
+/*--.. Operations on real types ............................................--*/
+
+LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getHalfTy(*unwrap(C));
+}
+LLVMTypeRef LLVMBFloatTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getBFloatTy(*unwrap(C));
+}
+LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getFloatTy(*unwrap(C));
+}
+LLVMTypeRef LLVMDoubleTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getDoubleTy(*unwrap(C));
+}
+LLVMTypeRef LLVMX86FP80TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getX86_FP80Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getFP128Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getPPC_FP128Ty(*unwrap(C));
+}
+LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getX86_MMXTy(*unwrap(C));
+}
+LLVMTypeRef LLVMX86AMXTypeInContext(LLVMContextRef C) {
+ return (LLVMTypeRef) Type::getX86_AMXTy(*unwrap(C));
+}
+
+LLVMTypeRef LLVMHalfType(void) {
+ return LLVMHalfTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMBFloatType(void) {
+ return LLVMBFloatTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMFloatType(void) {
+ return LLVMFloatTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMDoubleType(void) {
+ return LLVMDoubleTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMX86FP80Type(void) {
+ return LLVMX86FP80TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMFP128Type(void) {
+ return LLVMFP128TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMPPCFP128Type(void) {
+ return LLVMPPCFP128TypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMX86MMXType(void) {
+ return LLVMX86MMXTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMX86AMXType(void) {
+ return LLVMX86AMXTypeInContext(LLVMGetGlobalContext());
+}
+
+/*--.. Operations on function types ........................................--*/
+
+LLVMTypeRef LLVMFunctionType(LLVMTypeRef ReturnType,
+ LLVMTypeRef *ParamTypes, unsigned ParamCount,
+ LLVMBool IsVarArg) {
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ return wrap(FunctionType::get(unwrap(ReturnType), Tys, IsVarArg != 0));
+}
+
+LLVMBool LLVMIsFunctionVarArg(LLVMTypeRef FunctionTy) {
+ return unwrap<FunctionType>(FunctionTy)->isVarArg();
+}
+
+LLVMTypeRef LLVMGetReturnType(LLVMTypeRef FunctionTy) {
+ return wrap(unwrap<FunctionType>(FunctionTy)->getReturnType());
+}
+
+unsigned LLVMCountParamTypes(LLVMTypeRef FunctionTy) {
+ return unwrap<FunctionType>(FunctionTy)->getNumParams();
+}
+
+void LLVMGetParamTypes(LLVMTypeRef FunctionTy, LLVMTypeRef *Dest) {
+ FunctionType *Ty = unwrap<FunctionType>(FunctionTy);
+ for (Type *T : Ty->params())
+ *Dest++ = wrap(T);
+}
+
+/*--.. Operations on struct types ..........................................--*/
+
+LLVMTypeRef LLVMStructTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed) {
+ ArrayRef<Type*> Tys(unwrap(ElementTypes), ElementCount);
+ return wrap(StructType::get(*unwrap(C), Tys, Packed != 0));
+}
+
+LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed) {
+ return LLVMStructTypeInContext(LLVMGetGlobalContext(), ElementTypes,
+ ElementCount, Packed);
+}
+
+LLVMTypeRef LLVMStructCreateNamed(LLVMContextRef C, const char *Name)
+{
+ return wrap(StructType::create(*unwrap(C), Name));
+}
+
+const char *LLVMGetStructName(LLVMTypeRef Ty)
+{
+ StructType *Type = unwrap<StructType>(Ty);
+ if (!Type->hasName())
+ return nullptr;
+ return Type->getName().data();
+}
+
+void LLVMStructSetBody(LLVMTypeRef StructTy, LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed) {
+ ArrayRef<Type*> Tys(unwrap(ElementTypes), ElementCount);
+ unwrap<StructType>(StructTy)->setBody(Tys, Packed != 0);
+}
+
+unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->getNumElements();
+}
+
+void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest) {
+ StructType *Ty = unwrap<StructType>(StructTy);
+ for (Type *T : Ty->elements())
+ *Dest++ = wrap(T);
+}
+
+LLVMTypeRef LLVMStructGetTypeAtIndex(LLVMTypeRef StructTy, unsigned i) {
+ StructType *Ty = unwrap<StructType>(StructTy);
+ return wrap(Ty->getTypeAtIndex(i));
+}
+
+LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->isPacked();
+}
+
+LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->isOpaque();
+}
+
+LLVMBool LLVMIsLiteralStruct(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->isLiteral();
+}
+
+LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name) {
+ return wrap(StructType::getTypeByName(unwrap(M)->getContext(), Name));
+}
+
+LLVMTypeRef LLVMGetTypeByName2(LLVMContextRef C, const char *Name) {
+ return wrap(StructType::getTypeByName(*unwrap(C), Name));
+}
+
+/*--.. Operations on array, pointer, and vector types (sequence types) .....--*/
+
+void LLVMGetSubtypes(LLVMTypeRef Tp, LLVMTypeRef *Arr) {
+ int i = 0;
+ for (auto *T : unwrap(Tp)->subtypes()) {
+ Arr[i] = wrap(T);
+ i++;
+ }
+}
+
+LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount) {
+ return wrap(ArrayType::get(unwrap(ElementType), ElementCount));
+}
+
+LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace) {
+ return wrap(PointerType::get(unwrap(ElementType), AddressSpace));
+}
+
+LLVMBool LLVMPointerTypeIsOpaque(LLVMTypeRef Ty) {
+ return unwrap(Ty)->isOpaquePointerTy();
+}
+
+LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount) {
+ return wrap(FixedVectorType::get(unwrap(ElementType), ElementCount));
+}
+
+LLVMTypeRef LLVMScalableVectorType(LLVMTypeRef ElementType,
+ unsigned ElementCount) {
+ return wrap(ScalableVectorType::get(unwrap(ElementType), ElementCount));
+}
+
+LLVMTypeRef LLVMGetElementType(LLVMTypeRef WrappedTy) {
+ auto *Ty = unwrap<Type>(WrappedTy);
+ if (auto *PTy = dyn_cast<PointerType>(Ty))
+ return wrap(PTy->getNonOpaquePointerElementType());
+ if (auto *ATy = dyn_cast<ArrayType>(Ty))
+ return wrap(ATy->getElementType());
+ return wrap(cast<VectorType>(Ty)->getElementType());
+}
+
+unsigned LLVMGetNumContainedTypes(LLVMTypeRef Tp) {
+ return unwrap(Tp)->getNumContainedTypes();
+}
+
+unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy) {
+ return unwrap<ArrayType>(ArrayTy)->getNumElements();
+}
+
+unsigned LLVMGetPointerAddressSpace(LLVMTypeRef PointerTy) {
+ return unwrap<PointerType>(PointerTy)->getAddressSpace();
+}
+
+unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy) {
+ return unwrap<VectorType>(VectorTy)->getElementCount().getKnownMinValue();
+}
+
+/*--.. Operations on other types ...........................................--*/
+
+LLVMTypeRef LLVMPointerTypeInContext(LLVMContextRef C, unsigned AddressSpace) {
+ return wrap(PointerType::get(*unwrap(C), AddressSpace));
+}
+
+LLVMTypeRef LLVMVoidTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getVoidTy(*unwrap(C)));
+}
+LLVMTypeRef LLVMLabelTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getLabelTy(*unwrap(C)));
+}
+LLVMTypeRef LLVMTokenTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getTokenTy(*unwrap(C)));
+}
+LLVMTypeRef LLVMMetadataTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getMetadataTy(*unwrap(C)));
+}
+
+LLVMTypeRef LLVMVoidType(void) {
+ return LLVMVoidTypeInContext(LLVMGetGlobalContext());
+}
+LLVMTypeRef LLVMLabelType(void) {
+ return LLVMLabelTypeInContext(LLVMGetGlobalContext());
+}
+
+/*===-- Operations on values ----------------------------------------------===*/
+
+/*--.. Operations on all values ............................................--*/
+
+LLVMTypeRef LLVMTypeOf(LLVMValueRef Val) {
+ return wrap(unwrap(Val)->getType());
+}
+
+LLVMValueKind LLVMGetValueKind(LLVMValueRef Val) {
+ switch(unwrap(Val)->getValueID()) {
+#define LLVM_C_API 1
+#define HANDLE_VALUE(Name) \
+ case Value::Name##Val: \
+ return LLVM##Name##ValueKind;
+#include "llvm/IR/Value.def"
+ default:
+ return LLVMInstructionValueKind;
+ }
+}
+
+const char *LLVMGetValueName2(LLVMValueRef Val, size_t *Length) {
+ auto *V = unwrap(Val);
+ *Length = V->getName().size();
+ return V->getName().data();
+}
+
+void LLVMSetValueName2(LLVMValueRef Val, const char *Name, size_t NameLen) {
+ unwrap(Val)->setName(StringRef(Name, NameLen));
+}
+
+const char *LLVMGetValueName(LLVMValueRef Val) {
+ return unwrap(Val)->getName().data();
+}
+
+void LLVMSetValueName(LLVMValueRef Val, const char *Name) {
+ unwrap(Val)->setName(Name);
+}
+
+void LLVMDumpValue(LLVMValueRef Val) {
+ unwrap(Val)->print(errs(), /*IsForDebug=*/true);
+}
+
+char* LLVMPrintValueToString(LLVMValueRef Val) {
+ std::string buf;
+ raw_string_ostream os(buf);
+
+ if (unwrap(Val))
+ unwrap(Val)->print(os);
+ else
+ os << "Printing <null> Value";
+
+ os.flush();
+
+ return strdup(buf.c_str());
+}
+
+void LLVMReplaceAllUsesWith(LLVMValueRef OldVal, LLVMValueRef NewVal) {
+ unwrap(OldVal)->replaceAllUsesWith(unwrap(NewVal));
+}
+
+int LLVMHasMetadata(LLVMValueRef Inst) {
+ return unwrap<Instruction>(Inst)->hasMetadata();
+}
+
+LLVMValueRef LLVMGetMetadata(LLVMValueRef Inst, unsigned KindID) {
+ auto *I = unwrap<Instruction>(Inst);
+ assert(I && "Expected instruction");
+ if (auto *MD = I->getMetadata(KindID))
+ return wrap(MetadataAsValue::get(I->getContext(), MD));
+ return nullptr;
+}
+
+// MetadataAsValue uses a canonical format which strips the actual MDNode for
+// MDNode with just a single constant value, storing just a ConstantAsMetadata
+// This undoes this canonicalization, reconstructing the MDNode.
+static MDNode *extractMDNode(MetadataAsValue *MAV) {
+ Metadata *MD = MAV->getMetadata();
+ assert((isa<MDNode>(MD) || isa<ConstantAsMetadata>(MD)) &&
+ "Expected a metadata node or a canonicalized constant");
+
+ if (MDNode *N = dyn_cast<MDNode>(MD))
+ return N;
+
+ return MDNode::get(MAV->getContext(), MD);
+}
+
+void LLVMSetMetadata(LLVMValueRef Inst, unsigned KindID, LLVMValueRef Val) {
+ MDNode *N = Val ? extractMDNode(unwrap<MetadataAsValue>(Val)) : nullptr;
+
+ unwrap<Instruction>(Inst)->setMetadata(KindID, N);
+}
+
+struct LLVMOpaqueValueMetadataEntry {
+ unsigned Kind;
+ LLVMMetadataRef Metadata;
+};
+
+using MetadataEntries = SmallVectorImpl<std::pair<unsigned, MDNode *>>;
+static LLVMValueMetadataEntry *
+llvm_getMetadata(size_t *NumEntries,
+ llvm::function_ref<void(MetadataEntries &)> AccessMD) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MVEs;
+ AccessMD(MVEs);
+
+ LLVMOpaqueValueMetadataEntry *Result =
+ static_cast<LLVMOpaqueValueMetadataEntry *>(
+ safe_malloc(MVEs.size() * sizeof(LLVMOpaqueValueMetadataEntry)));
+ for (unsigned i = 0; i < MVEs.size(); ++i) {
+ const auto &ModuleFlag = MVEs[i];
+ Result[i].Kind = ModuleFlag.first;
+ Result[i].Metadata = wrap(ModuleFlag.second);
+ }
+ *NumEntries = MVEs.size();
+ return Result;
+}
+
+LLVMValueMetadataEntry *
+LLVMInstructionGetAllMetadataOtherThanDebugLoc(LLVMValueRef Value,
+ size_t *NumEntries) {
+ return llvm_getMetadata(NumEntries, [&Value](MetadataEntries &Entries) {
+ Entries.clear();
+ unwrap<Instruction>(Value)->getAllMetadata(Entries);
+ });
+}
+
+/*--.. Conversion functions ................................................--*/
+
+#define LLVM_DEFINE_VALUE_CAST(name) \
+ LLVMValueRef LLVMIsA##name(LLVMValueRef Val) { \
+ return wrap(static_cast<Value*>(dyn_cast_or_null<name>(unwrap(Val)))); \
+ }
+
+LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DEFINE_VALUE_CAST)
+
+LLVMValueRef LLVMIsAMDNode(LLVMValueRef Val) {
+ if (auto *MD = dyn_cast_or_null<MetadataAsValue>(unwrap(Val)))
+ if (isa<MDNode>(MD->getMetadata()) ||
+ isa<ValueAsMetadata>(MD->getMetadata()))
+ return Val;
+ return nullptr;
+}
+
+LLVMValueRef LLVMIsAMDString(LLVMValueRef Val) {
+ if (auto *MD = dyn_cast_or_null<MetadataAsValue>(unwrap(Val)))
+ if (isa<MDString>(MD->getMetadata()))
+ return Val;
+ return nullptr;
+}
+
+/*--.. Operations on Uses ..................................................--*/
+LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val) {
+ Value *V = unwrap(Val);
+ Value::use_iterator I = V->use_begin();
+ if (I == V->use_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMUseRef LLVMGetNextUse(LLVMUseRef U) {
+ Use *Next = unwrap(U)->getNext();
+ if (Next)
+ return wrap(Next);
+ return nullptr;
+}
+
+LLVMValueRef LLVMGetUser(LLVMUseRef U) {
+ return wrap(unwrap(U)->getUser());
+}
+
+LLVMValueRef LLVMGetUsedValue(LLVMUseRef U) {
+ return wrap(unwrap(U)->get());
+}
+
+/*--.. Operations on Users .................................................--*/
+
+static LLVMValueRef getMDNodeOperandImpl(LLVMContext &Context, const MDNode *N,
+ unsigned Index) {
+ Metadata *Op = N->getOperand(Index);
+ if (!Op)
+ return nullptr;
+ if (auto *C = dyn_cast<ConstantAsMetadata>(Op))
+ return wrap(C->getValue());
+ return wrap(MetadataAsValue::get(Context, Op));
+}
+
+LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index) {
+ Value *V = unwrap(Val);
+ if (auto *MD = dyn_cast<MetadataAsValue>(V)) {
+ if (auto *L = dyn_cast<ValueAsMetadata>(MD->getMetadata())) {
+ assert(Index == 0 && "Function-local metadata can only have one operand");
+ return wrap(L->getValue());
+ }
+ return getMDNodeOperandImpl(V->getContext(),
+ cast<MDNode>(MD->getMetadata()), Index);
+ }
+
+ return wrap(cast<User>(V)->getOperand(Index));
+}
+
+LLVMUseRef LLVMGetOperandUse(LLVMValueRef Val, unsigned Index) {
+ Value *V = unwrap(Val);
+ return wrap(&cast<User>(V)->getOperandUse(Index));
+}
+
+void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) {
+ unwrap<User>(Val)->setOperand(Index, unwrap(Op));
+}
+
+int LLVMGetNumOperands(LLVMValueRef Val) {
+ Value *V = unwrap(Val);
+ if (isa<MetadataAsValue>(V))
+ return LLVMGetMDNodeNumOperands(Val);
+
+ return cast<User>(V)->getNumOperands();
+}
+
+/*--.. Operations on constants of any type .................................--*/
+
+LLVMValueRef LLVMConstNull(LLVMTypeRef Ty) {
+ return wrap(Constant::getNullValue(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty) {
+ return wrap(Constant::getAllOnesValue(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMGetUndef(LLVMTypeRef Ty) {
+ return wrap(UndefValue::get(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMGetPoison(LLVMTypeRef Ty) {
+ return wrap(PoisonValue::get(unwrap(Ty)));
+}
+
+LLVMBool LLVMIsConstant(LLVMValueRef Ty) {
+ return isa<Constant>(unwrap(Ty));
+}
+
+LLVMBool LLVMIsNull(LLVMValueRef Val) {
+ if (Constant *C = dyn_cast<Constant>(unwrap(Val)))
+ return C->isNullValue();
+ return false;
+}
+
+LLVMBool LLVMIsUndef(LLVMValueRef Val) {
+ return isa<UndefValue>(unwrap(Val));
+}
+
+LLVMBool LLVMIsPoison(LLVMValueRef Val) {
+ return isa<PoisonValue>(unwrap(Val));
+}
+
+LLVMValueRef LLVMConstPointerNull(LLVMTypeRef Ty) {
+ return wrap(ConstantPointerNull::get(unwrap<PointerType>(Ty)));
+}
+
+/*--.. Operations on metadata nodes ........................................--*/
+
+LLVMMetadataRef LLVMMDStringInContext2(LLVMContextRef C, const char *Str,
+ size_t SLen) {
+ return wrap(MDString::get(*unwrap(C), StringRef(Str, SLen)));
+}
+
+LLVMMetadataRef LLVMMDNodeInContext2(LLVMContextRef C, LLVMMetadataRef *MDs,
+ size_t Count) {
+ return wrap(MDNode::get(*unwrap(C), ArrayRef<Metadata*>(unwrap(MDs), Count)));
+}
+
+LLVMValueRef LLVMMDStringInContext(LLVMContextRef C, const char *Str,
+ unsigned SLen) {
+ LLVMContext &Context = *unwrap(C);
+ return wrap(MetadataAsValue::get(
+ Context, MDString::get(Context, StringRef(Str, SLen))));
+}
+
+LLVMValueRef LLVMMDString(const char *Str, unsigned SLen) {
+ return LLVMMDStringInContext(LLVMGetGlobalContext(), Str, SLen);
+}
+
+LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals,
+ unsigned Count) {
+ LLVMContext &Context = *unwrap(C);
+ SmallVector<Metadata *, 8> MDs;
+ for (auto *OV : makeArrayRef(Vals, Count)) {
+ Value *V = unwrap(OV);
+ Metadata *MD;
+ if (!V)
+ MD = nullptr;
+ else if (auto *C = dyn_cast<Constant>(V))
+ MD = ConstantAsMetadata::get(C);
+ else if (auto *MDV = dyn_cast<MetadataAsValue>(V)) {
+ MD = MDV->getMetadata();
+ assert(!isa<LocalAsMetadata>(MD) && "Unexpected function-local metadata "
+ "outside of direct argument to call");
+ } else {
+ // This is function-local metadata. Pretend to make an MDNode.
+ assert(Count == 1 &&
+ "Expected only one operand to function-local metadata");
+ return wrap(MetadataAsValue::get(Context, LocalAsMetadata::get(V)));
+ }
+
+ MDs.push_back(MD);
+ }
+ return wrap(MetadataAsValue::get(Context, MDNode::get(Context, MDs)));
+}
+
+LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count) {
+ return LLVMMDNodeInContext(LLVMGetGlobalContext(), Vals, Count);
+}
+
+LLVMValueRef LLVMMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) {
+ return wrap(MetadataAsValue::get(*unwrap(C), unwrap(MD)));
+}
+
+LLVMMetadataRef LLVMValueAsMetadata(LLVMValueRef Val) {
+ auto *V = unwrap(Val);
+ if (auto *C = dyn_cast<Constant>(V))
+ return wrap(ConstantAsMetadata::get(C));
+ if (auto *MAV = dyn_cast<MetadataAsValue>(V))
+ return wrap(MAV->getMetadata());
+ return wrap(ValueAsMetadata::get(V));
+}
+
+const char *LLVMGetMDString(LLVMValueRef V, unsigned *Length) {
+ if (const auto *MD = dyn_cast<MetadataAsValue>(unwrap(V)))
+ if (const MDString *S = dyn_cast<MDString>(MD->getMetadata())) {
+ *Length = S->getString().size();
+ return S->getString().data();
+ }
+ *Length = 0;
+ return nullptr;
+}
+
+unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V) {
+ auto *MD = cast<MetadataAsValue>(unwrap(V));
+ if (isa<ValueAsMetadata>(MD->getMetadata()))
+ return 1;
+ return cast<MDNode>(MD->getMetadata())->getNumOperands();
+}
+
+LLVMNamedMDNodeRef LLVMGetFirstNamedMetadata(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::named_metadata_iterator I = Mod->named_metadata_begin();
+ if (I == Mod->named_metadata_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMNamedMDNodeRef LLVMGetLastNamedMetadata(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::named_metadata_iterator I = Mod->named_metadata_end();
+ if (I == Mod->named_metadata_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMNamedMDNodeRef LLVMGetNextNamedMetadata(LLVMNamedMDNodeRef NMD) {
+ NamedMDNode *NamedNode = unwrap<NamedMDNode>(NMD);
+ Module::named_metadata_iterator I(NamedNode);
+ if (++I == NamedNode->getParent()->named_metadata_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMNamedMDNodeRef LLVMGetPreviousNamedMetadata(LLVMNamedMDNodeRef NMD) {
+ NamedMDNode *NamedNode = unwrap<NamedMDNode>(NMD);
+ Module::named_metadata_iterator I(NamedNode);
+ if (I == NamedNode->getParent()->named_metadata_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMNamedMDNodeRef LLVMGetNamedMetadata(LLVMModuleRef M,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(M)->getNamedMetadata(StringRef(Name, NameLen)));
+}
+
+LLVMNamedMDNodeRef LLVMGetOrInsertNamedMetadata(LLVMModuleRef M,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(M)->getOrInsertNamedMetadata({Name, NameLen}));
+}
+
+const char *LLVMGetNamedMetadataName(LLVMNamedMDNodeRef NMD, size_t *NameLen) {
+ NamedMDNode *NamedNode = unwrap<NamedMDNode>(NMD);
+ *NameLen = NamedNode->getName().size();
+ return NamedNode->getName().data();
+}
+
+void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest) {
+ auto *MD = cast<MetadataAsValue>(unwrap(V));
+ if (auto *MDV = dyn_cast<ValueAsMetadata>(MD->getMetadata())) {
+ *Dest = wrap(MDV->getValue());
+ return;
+ }
+ const auto *N = cast<MDNode>(MD->getMetadata());
+ const unsigned numOperands = N->getNumOperands();
+ LLVMContext &Context = unwrap(V)->getContext();
+ for (unsigned i = 0; i < numOperands; i++)
+ Dest[i] = getMDNodeOperandImpl(Context, N, i);
+}
+
+unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char *Name) {
+ if (NamedMDNode *N = unwrap(M)->getNamedMetadata(Name)) {
+ return N->getNumOperands();
+ }
+ return 0;
+}
+
+void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char *Name,
+ LLVMValueRef *Dest) {
+ NamedMDNode *N = unwrap(M)->getNamedMetadata(Name);
+ if (!N)
+ return;
+ LLVMContext &Context = unwrap(M)->getContext();
+ for (unsigned i=0;i<N->getNumOperands();i++)
+ Dest[i] = wrap(MetadataAsValue::get(Context, N->getOperand(i)));
+}
+
+void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char *Name,
+ LLVMValueRef Val) {
+ NamedMDNode *N = unwrap(M)->getOrInsertNamedMetadata(Name);
+ if (!N)
+ return;
+ if (!Val)
+ return;
+ N->addOperand(extractMDNode(unwrap<MetadataAsValue>(Val)));
+}
+
+const char *LLVMGetDebugLocDirectory(LLVMValueRef Val, unsigned *Length) {
+ if (!Length) return nullptr;
+ StringRef S;
+ if (const auto *I = dyn_cast<Instruction>(unwrap(Val))) {
+ if (const auto &DL = I->getDebugLoc()) {
+ S = DL->getDirectory();
+ }
+ } else if (const auto *GV = dyn_cast<GlobalVariable>(unwrap(Val))) {
+ SmallVector<DIGlobalVariableExpression *, 1> GVEs;
+ GV->getDebugInfo(GVEs);
+ if (GVEs.size())
+ if (const DIGlobalVariable *DGV = GVEs[0]->getVariable())
+ S = DGV->getDirectory();
+ } else if (const auto *F = dyn_cast<Function>(unwrap(Val))) {
+ if (const DISubprogram *DSP = F->getSubprogram())
+ S = DSP->getDirectory();
+ } else {
+ assert(0 && "Expected Instruction, GlobalVariable or Function");
+ return nullptr;
+ }
+ *Length = S.size();
+ return S.data();
+}
+
+const char *LLVMGetDebugLocFilename(LLVMValueRef Val, unsigned *Length) {
+ if (!Length) return nullptr;
+ StringRef S;
+ if (const auto *I = dyn_cast<Instruction>(unwrap(Val))) {
+ if (const auto &DL = I->getDebugLoc()) {
+ S = DL->getFilename();
+ }
+ } else if (const auto *GV = dyn_cast<GlobalVariable>(unwrap(Val))) {
+ SmallVector<DIGlobalVariableExpression *, 1> GVEs;
+ GV->getDebugInfo(GVEs);
+ if (GVEs.size())
+ if (const DIGlobalVariable *DGV = GVEs[0]->getVariable())
+ S = DGV->getFilename();
+ } else if (const auto *F = dyn_cast<Function>(unwrap(Val))) {
+ if (const DISubprogram *DSP = F->getSubprogram())
+ S = DSP->getFilename();
+ } else {
+ assert(0 && "Expected Instruction, GlobalVariable or Function");
+ return nullptr;
+ }
+ *Length = S.size();
+ return S.data();
+}
+
+unsigned LLVMGetDebugLocLine(LLVMValueRef Val) {
+ unsigned L = 0;
+ if (const auto *I = dyn_cast<Instruction>(unwrap(Val))) {
+ if (const auto &DL = I->getDebugLoc()) {
+ L = DL->getLine();
+ }
+ } else if (const auto *GV = dyn_cast<GlobalVariable>(unwrap(Val))) {
+ SmallVector<DIGlobalVariableExpression *, 1> GVEs;
+ GV->getDebugInfo(GVEs);
+ if (GVEs.size())
+ if (const DIGlobalVariable *DGV = GVEs[0]->getVariable())
+ L = DGV->getLine();
+ } else if (const auto *F = dyn_cast<Function>(unwrap(Val))) {
+ if (const DISubprogram *DSP = F->getSubprogram())
+ L = DSP->getLine();
+ } else {
+ assert(0 && "Expected Instruction, GlobalVariable or Function");
+ return -1;
+ }
+ return L;
+}
+
+unsigned LLVMGetDebugLocColumn(LLVMValueRef Val) {
+ unsigned C = 0;
+ if (const auto *I = dyn_cast<Instruction>(unwrap(Val)))
+ if (const auto &DL = I->getDebugLoc())
+ C = DL->getColumn();
+ return C;
+}
+
+/*--.. Operations on scalar constants ......................................--*/
+
+LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
+ LLVMBool SignExtend) {
+ return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), N, SignExtend != 0));
+}
+
+LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy,
+ unsigned NumWords,
+ const uint64_t Words[]) {
+ IntegerType *Ty = unwrap<IntegerType>(IntTy);
+ return wrap(ConstantInt::get(Ty->getContext(),
+ APInt(Ty->getBitWidth(),
+ makeArrayRef(Words, NumWords))));
+}
+
+LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char Str[],
+ uint8_t Radix) {
+ return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str),
+ Radix));
+}
+
+LLVMValueRef LLVMConstIntOfStringAndSize(LLVMTypeRef IntTy, const char Str[],
+ unsigned SLen, uint8_t Radix) {
+ return wrap(ConstantInt::get(unwrap<IntegerType>(IntTy), StringRef(Str, SLen),
+ Radix));
+}
+
+LLVMValueRef LLVMConstReal(LLVMTypeRef RealTy, double N) {
+ return wrap(ConstantFP::get(unwrap(RealTy), N));
+}
+
+LLVMValueRef LLVMConstRealOfString(LLVMTypeRef RealTy, const char *Text) {
+ return wrap(ConstantFP::get(unwrap(RealTy), StringRef(Text)));
+}
+
+LLVMValueRef LLVMConstRealOfStringAndSize(LLVMTypeRef RealTy, const char Str[],
+ unsigned SLen) {
+ return wrap(ConstantFP::get(unwrap(RealTy), StringRef(Str, SLen)));
+}
+
+unsigned long long LLVMConstIntGetZExtValue(LLVMValueRef ConstantVal) {
+ return unwrap<ConstantInt>(ConstantVal)->getZExtValue();
+}
+
+long long LLVMConstIntGetSExtValue(LLVMValueRef ConstantVal) {
+ return unwrap<ConstantInt>(ConstantVal)->getSExtValue();
+}
+
+double LLVMConstRealGetDouble(LLVMValueRef ConstantVal, LLVMBool *LosesInfo) {
+ ConstantFP *cFP = unwrap<ConstantFP>(ConstantVal) ;
+ Type *Ty = cFP->getType();
+
+ if (Ty->isHalfTy() || Ty->isBFloatTy() || Ty->isFloatTy() ||
+ Ty->isDoubleTy()) {
+ *LosesInfo = false;
+ return cFP->getValueAPF().convertToDouble();
+ }
+
+ bool APFLosesInfo;
+ APFloat APF = cFP->getValueAPF();
+ APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &APFLosesInfo);
+ *LosesInfo = APFLosesInfo;
+ return APF.convertToDouble();
+}
+
+/*--.. Operations on composite constants ...................................--*/
+
+LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str,
+ unsigned Length,
+ LLVMBool DontNullTerminate) {
+ /* Inverted the sense of AddNull because ', 0)' is a
+ better mnemonic for null termination than ', 1)'. */
+ return wrap(ConstantDataArray::getString(*unwrap(C), StringRef(Str, Length),
+ DontNullTerminate == 0));
+}
+
+LLVMValueRef LLVMConstString(const char *Str, unsigned Length,
+ LLVMBool DontNullTerminate) {
+ return LLVMConstStringInContext(LLVMGetGlobalContext(), Str, Length,
+ DontNullTerminate);
+}
+
+LLVMValueRef LLVMGetAggregateElement(LLVMValueRef C, unsigned Idx) {
+ return wrap(unwrap<Constant>(C)->getAggregateElement(Idx));
+}
+
+LLVMValueRef LLVMGetElementAsConstant(LLVMValueRef C, unsigned idx) {
+ return wrap(unwrap<ConstantDataSequential>(C)->getElementAsConstant(idx));
+}
+
+LLVMBool LLVMIsConstantString(LLVMValueRef C) {
+ return unwrap<ConstantDataSequential>(C)->isString();
+}
+
+const char *LLVMGetAsString(LLVMValueRef C, size_t *Length) {
+ StringRef Str = unwrap<ConstantDataSequential>(C)->getAsString();
+ *Length = Str.size();
+ return Str.data();
+}
+
+LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy,
+ LLVMValueRef *ConstantVals, unsigned Length) {
+ ArrayRef<Constant*> V(unwrap<Constant>(ConstantVals, Length), Length);
+ return wrap(ConstantArray::get(ArrayType::get(unwrap(ElementTy), Length), V));
+}
+
+LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
+ LLVMValueRef *ConstantVals,
+ unsigned Count, LLVMBool Packed) {
+ Constant **Elements = unwrap<Constant>(ConstantVals, Count);
+ return wrap(ConstantStruct::getAnon(*unwrap(C), makeArrayRef(Elements, Count),
+ Packed != 0));
+}
+
+LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count,
+ LLVMBool Packed) {
+ return LLVMConstStructInContext(LLVMGetGlobalContext(), ConstantVals, Count,
+ Packed);
+}
+
+LLVMValueRef LLVMConstNamedStruct(LLVMTypeRef StructTy,
+ LLVMValueRef *ConstantVals,
+ unsigned Count) {
+ Constant **Elements = unwrap<Constant>(ConstantVals, Count);
+ StructType *Ty = cast<StructType>(unwrap(StructTy));
+
+ return wrap(ConstantStruct::get(Ty, makeArrayRef(Elements, Count)));
+}
+
+LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) {
+ return wrap(ConstantVector::get(makeArrayRef(
+ unwrap<Constant>(ScalarConstantVals, Size), Size)));
+}
+
+/*-- Opcode mapping */
+
+static LLVMOpcode map_to_llvmopcode(int opcode)
+{
+ switch (opcode) {
+ default: llvm_unreachable("Unhandled Opcode.");
+#define HANDLE_INST(num, opc, clas) case num: return LLVM##opc;
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_INST
+ }
+}
+
+static int map_from_llvmopcode(LLVMOpcode code)
+{
+ switch (code) {
+#define HANDLE_INST(num, opc, clas) case LLVM##opc: return num;
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_INST
+ }
+ llvm_unreachable("Unhandled Opcode.");
+}
+
+/*--.. Constant expressions ................................................--*/
+
+LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal) {
+ return map_to_llvmopcode(unwrap<ConstantExpr>(ConstantVal)->getOpcode());
+}
+
+LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty) {
+ return wrap(ConstantExpr::getAlignOf(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty) {
+ return wrap(ConstantExpr::getSizeOf(unwrap(Ty)));
+}
+
+LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNeg(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNSWNeg(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNUWNeg(unwrap<Constant>(ConstantVal)));
+}
+
+
+LLVMValueRef LLVMConstFNeg(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getFNeg(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal) {
+ return wrap(ConstantExpr::getNot(unwrap<Constant>(ConstantVal)));
+}
+
+LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNSWAdd(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNSWAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNUWAdd(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNUWAdd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNSWSub(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNSWSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNUWSub(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNUWSub(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNSWMul(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNSWMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstNUWMul(LLVMValueRef LHSConstant,
+ LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getNUWMul(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstAnd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getAnd(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstOr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getOr(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstXor(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getXor(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstICmp(LLVMIntPredicate Predicate,
+ LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getICmp(Predicate,
+ unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate,
+ LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getFCmp(Predicate,
+ unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getShl(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getLShr(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant) {
+ return wrap(ConstantExpr::getAShr(unwrap<Constant>(LHSConstant),
+ unwrap<Constant>(RHSConstant)));
+}
+
+LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices, unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
+ return wrap(ConstantExpr::getGetElementPtr(Ty, Val, IdxList));
+}
+
+LLVMValueRef LLVMConstGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices, unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ return wrap(ConstantExpr::getGetElementPtr(unwrap(Ty), Val, IdxList));
+}
+
+LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
+ return wrap(ConstantExpr::getInBoundsGetElementPtr(Ty, Val, IdxList));
+}
+
+LLVMValueRef LLVMConstInBoundsGEP2(LLVMTypeRef Ty, LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList));
+}
+
+LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getTrunc(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getSExt(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getZExt(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPTrunc(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPExtend(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstUIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getUIToFP(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getSIToFP(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPToUI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPToUI(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstFPToSI(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPToSI(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstPtrToInt(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getPtrToInt(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstIntToPtr(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getIntToPtr(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstBitCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstAddrSpaceCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getAddrSpaceCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstZExtOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getZExtOrBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSExtOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getSExtOrBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstTruncOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getTruncOrBitCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstPointerCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getPointerCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstIntCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType,
+ LLVMBool isSigned) {
+ return wrap(ConstantExpr::getIntegerCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType), isSigned));
+}
+
+LLVMValueRef LLVMConstFPCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
+ return wrap(ConstantExpr::getFPCast(unwrap<Constant>(ConstantVal),
+ unwrap(ToType)));
+}
+
+LLVMValueRef LLVMConstSelect(LLVMValueRef ConstantCondition,
+ LLVMValueRef ConstantIfTrue,
+ LLVMValueRef ConstantIfFalse) {
+ return wrap(ConstantExpr::getSelect(unwrap<Constant>(ConstantCondition),
+ unwrap<Constant>(ConstantIfTrue),
+ unwrap<Constant>(ConstantIfFalse)));
+}
+
+LLVMValueRef LLVMConstExtractElement(LLVMValueRef VectorConstant,
+ LLVMValueRef IndexConstant) {
+ return wrap(ConstantExpr::getExtractElement(unwrap<Constant>(VectorConstant),
+ unwrap<Constant>(IndexConstant)));
+}
+
+LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant,
+ LLVMValueRef ElementValueConstant,
+ LLVMValueRef IndexConstant) {
+ return wrap(ConstantExpr::getInsertElement(unwrap<Constant>(VectorConstant),
+ unwrap<Constant>(ElementValueConstant),
+ unwrap<Constant>(IndexConstant)));
+}
+
+LLVMValueRef LLVMConstShuffleVector(LLVMValueRef VectorAConstant,
+ LLVMValueRef VectorBConstant,
+ LLVMValueRef MaskConstant) {
+ SmallVector<int, 16> IntMask;
+ ShuffleVectorInst::getShuffleMask(unwrap<Constant>(MaskConstant), IntMask);
+ return wrap(ConstantExpr::getShuffleVector(unwrap<Constant>(VectorAConstant),
+ unwrap<Constant>(VectorBConstant),
+ IntMask));
+}
+
+LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty, const char *AsmString,
+ const char *Constraints,
+ LLVMBool HasSideEffects,
+ LLVMBool IsAlignStack) {
+ return wrap(InlineAsm::get(dyn_cast<FunctionType>(unwrap(Ty)), AsmString,
+ Constraints, HasSideEffects, IsAlignStack));
+}
+
+LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB) {
+ return wrap(BlockAddress::get(unwrap<Function>(F), unwrap(BB)));
+}
+
+/*--.. Operations on global variables, functions, and aliases (globals) ....--*/
+
+LLVMModuleRef LLVMGetGlobalParent(LLVMValueRef Global) {
+ return wrap(unwrap<GlobalValue>(Global)->getParent());
+}
+
+LLVMBool LLVMIsDeclaration(LLVMValueRef Global) {
+ return unwrap<GlobalValue>(Global)->isDeclaration();
+}
+
+LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
+ switch (unwrap<GlobalValue>(Global)->getLinkage()) {
+ case GlobalValue::ExternalLinkage:
+ return LLVMExternalLinkage;
+ case GlobalValue::AvailableExternallyLinkage:
+ return LLVMAvailableExternallyLinkage;
+ case GlobalValue::LinkOnceAnyLinkage:
+ return LLVMLinkOnceAnyLinkage;
+ case GlobalValue::LinkOnceODRLinkage:
+ return LLVMLinkOnceODRLinkage;
+ case GlobalValue::WeakAnyLinkage:
+ return LLVMWeakAnyLinkage;
+ case GlobalValue::WeakODRLinkage:
+ return LLVMWeakODRLinkage;
+ case GlobalValue::AppendingLinkage:
+ return LLVMAppendingLinkage;
+ case GlobalValue::InternalLinkage:
+ return LLVMInternalLinkage;
+ case GlobalValue::PrivateLinkage:
+ return LLVMPrivateLinkage;
+ case GlobalValue::ExternalWeakLinkage:
+ return LLVMExternalWeakLinkage;
+ case GlobalValue::CommonLinkage:
+ return LLVMCommonLinkage;
+ }
+
+ llvm_unreachable("Invalid GlobalValue linkage!");
+}
+
+void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
+ GlobalValue *GV = unwrap<GlobalValue>(Global);
+
+ switch (Linkage) {
+ case LLVMExternalLinkage:
+ GV->setLinkage(GlobalValue::ExternalLinkage);
+ break;
+ case LLVMAvailableExternallyLinkage:
+ GV->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ break;
+ case LLVMLinkOnceAnyLinkage:
+ GV->setLinkage(GlobalValue::LinkOnceAnyLinkage);
+ break;
+ case LLVMLinkOnceODRLinkage:
+ GV->setLinkage(GlobalValue::LinkOnceODRLinkage);
+ break;
+ case LLVMLinkOnceODRAutoHideLinkage:
+ LLVM_DEBUG(
+ errs() << "LLVMSetLinkage(): LLVMLinkOnceODRAutoHideLinkage is no "
+ "longer supported.");
+ break;
+ case LLVMWeakAnyLinkage:
+ GV->setLinkage(GlobalValue::WeakAnyLinkage);
+ break;
+ case LLVMWeakODRLinkage:
+ GV->setLinkage(GlobalValue::WeakODRLinkage);
+ break;
+ case LLVMAppendingLinkage:
+ GV->setLinkage(GlobalValue::AppendingLinkage);
+ break;
+ case LLVMInternalLinkage:
+ GV->setLinkage(GlobalValue::InternalLinkage);
+ break;
+ case LLVMPrivateLinkage:
+ GV->setLinkage(GlobalValue::PrivateLinkage);
+ break;
+ case LLVMLinkerPrivateLinkage:
+ GV->setLinkage(GlobalValue::PrivateLinkage);
+ break;
+ case LLVMLinkerPrivateWeakLinkage:
+ GV->setLinkage(GlobalValue::PrivateLinkage);
+ break;
+ case LLVMDLLImportLinkage:
+ LLVM_DEBUG(
+ errs()
+ << "LLVMSetLinkage(): LLVMDLLImportLinkage is no longer supported.");
+ break;
+ case LLVMDLLExportLinkage:
+ LLVM_DEBUG(
+ errs()
+ << "LLVMSetLinkage(): LLVMDLLExportLinkage is no longer supported.");
+ break;
+ case LLVMExternalWeakLinkage:
+ GV->setLinkage(GlobalValue::ExternalWeakLinkage);
+ break;
+ case LLVMGhostLinkage:
+ LLVM_DEBUG(
+ errs() << "LLVMSetLinkage(): LLVMGhostLinkage is no longer supported.");
+ break;
+ case LLVMCommonLinkage:
+ GV->setLinkage(GlobalValue::CommonLinkage);
+ break;
+ }
+}
+
+const char *LLVMGetSection(LLVMValueRef Global) {
+ // Using .data() is safe because of how GlobalObject::setSection is
+ // implemented.
+ return unwrap<GlobalValue>(Global)->getSection().data();
+}
+
+void LLVMSetSection(LLVMValueRef Global, const char *Section) {
+ unwrap<GlobalObject>(Global)->setSection(Section);
+}
+
+LLVMVisibility LLVMGetVisibility(LLVMValueRef Global) {
+ return static_cast<LLVMVisibility>(
+ unwrap<GlobalValue>(Global)->getVisibility());
+}
+
+void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz) {
+ unwrap<GlobalValue>(Global)
+ ->setVisibility(static_cast<GlobalValue::VisibilityTypes>(Viz));
+}
+
+LLVMDLLStorageClass LLVMGetDLLStorageClass(LLVMValueRef Global) {
+ return static_cast<LLVMDLLStorageClass>(
+ unwrap<GlobalValue>(Global)->getDLLStorageClass());
+}
+
+void LLVMSetDLLStorageClass(LLVMValueRef Global, LLVMDLLStorageClass Class) {
+ unwrap<GlobalValue>(Global)->setDLLStorageClass(
+ static_cast<GlobalValue::DLLStorageClassTypes>(Class));
+}
+
+LLVMUnnamedAddr LLVMGetUnnamedAddress(LLVMValueRef Global) {
+ switch (unwrap<GlobalValue>(Global)->getUnnamedAddr()) {
+ case GlobalVariable::UnnamedAddr::None:
+ return LLVMNoUnnamedAddr;
+ case GlobalVariable::UnnamedAddr::Local:
+ return LLVMLocalUnnamedAddr;
+ case GlobalVariable::UnnamedAddr::Global:
+ return LLVMGlobalUnnamedAddr;
+ }
+ llvm_unreachable("Unknown UnnamedAddr kind!");
+}
+
+void LLVMSetUnnamedAddress(LLVMValueRef Global, LLVMUnnamedAddr UnnamedAddr) {
+ GlobalValue *GV = unwrap<GlobalValue>(Global);
+
+ switch (UnnamedAddr) {
+ case LLVMNoUnnamedAddr:
+ return GV->setUnnamedAddr(GlobalVariable::UnnamedAddr::None);
+ case LLVMLocalUnnamedAddr:
+ return GV->setUnnamedAddr(GlobalVariable::UnnamedAddr::Local);
+ case LLVMGlobalUnnamedAddr:
+ return GV->setUnnamedAddr(GlobalVariable::UnnamedAddr::Global);
+ }
+}
+
+LLVMBool LLVMHasUnnamedAddr(LLVMValueRef Global) {
+ return unwrap<GlobalValue>(Global)->hasGlobalUnnamedAddr();
+}
+
+void LLVMSetUnnamedAddr(LLVMValueRef Global, LLVMBool HasUnnamedAddr) {
+ unwrap<GlobalValue>(Global)->setUnnamedAddr(
+ HasUnnamedAddr ? GlobalValue::UnnamedAddr::Global
+ : GlobalValue::UnnamedAddr::None);
+}
+
+LLVMTypeRef LLVMGlobalGetValueType(LLVMValueRef Global) {
+ return wrap(unwrap<GlobalValue>(Global)->getValueType());
+}
+
+/*--.. Operations on global variables, load and store instructions .........--*/
+
+unsigned LLVMGetAlignment(LLVMValueRef V) {
+ Value *P = unwrap<Value>(V);
+ if (GlobalObject *GV = dyn_cast<GlobalObject>(P))
+ return GV->getAlign() ? GV->getAlign()->value() : 0;
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
+ return AI->getAlign().value();
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->getAlign().value();
+ if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ return SI->getAlign().value();
+ if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(P))
+ return RMWI->getAlign().value();
+ if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(P))
+ return CXI->getAlign().value();
+
+ llvm_unreachable(
+ "only GlobalValue, AllocaInst, LoadInst, StoreInst, AtomicRMWInst, "
+ "and AtomicCmpXchgInst have alignment");
+}
+
+void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) {
+ Value *P = unwrap<Value>(V);
+ if (GlobalObject *GV = dyn_cast<GlobalObject>(P))
+ GV->setAlignment(MaybeAlign(Bytes));
+ else if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
+ AI->setAlignment(Align(Bytes));
+ else if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ LI->setAlignment(Align(Bytes));
+ else if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ SI->setAlignment(Align(Bytes));
+ else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(P))
+ RMWI->setAlignment(Align(Bytes));
+ else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(P))
+ CXI->setAlignment(Align(Bytes));
+ else
+ llvm_unreachable(
+ "only GlobalValue, AllocaInst, LoadInst, StoreInst, AtomicRMWInst, and "
+ "and AtomicCmpXchgInst have alignment");
+}
+
+LLVMValueMetadataEntry *LLVMGlobalCopyAllMetadata(LLVMValueRef Value,
+ size_t *NumEntries) {
+ return llvm_getMetadata(NumEntries, [&Value](MetadataEntries &Entries) {
+ Entries.clear();
+ if (Instruction *Instr = dyn_cast<Instruction>(unwrap(Value))) {
+ Instr->getAllMetadata(Entries);
+ } else {
+ unwrap<GlobalObject>(Value)->getAllMetadata(Entries);
+ }
+ });
+}
+
+unsigned LLVMValueMetadataEntriesGetKind(LLVMValueMetadataEntry *Entries,
+ unsigned Index) {
+ LLVMOpaqueValueMetadataEntry MVE =
+ static_cast<LLVMOpaqueValueMetadataEntry>(Entries[Index]);
+ return MVE.Kind;
+}
+
+LLVMMetadataRef
+LLVMValueMetadataEntriesGetMetadata(LLVMValueMetadataEntry *Entries,
+ unsigned Index) {
+ LLVMOpaqueValueMetadataEntry MVE =
+ static_cast<LLVMOpaqueValueMetadataEntry>(Entries[Index]);
+ return MVE.Metadata;
+}
+
+void LLVMDisposeValueMetadataEntries(LLVMValueMetadataEntry *Entries) {
+ free(Entries);
+}
+
+void LLVMGlobalSetMetadata(LLVMValueRef Global, unsigned Kind,
+ LLVMMetadataRef MD) {
+ unwrap<GlobalObject>(Global)->setMetadata(Kind, unwrap<MDNode>(MD));
+}
+
+void LLVMGlobalEraseMetadata(LLVMValueRef Global, unsigned Kind) {
+ unwrap<GlobalObject>(Global)->eraseMetadata(Kind);
+}
+
+void LLVMGlobalClearMetadata(LLVMValueRef Global) {
+ unwrap<GlobalObject>(Global)->clearMetadata();
+}
+
+/*--.. Operations on global variables ......................................--*/
+
+LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name) {
+ return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
+ GlobalValue::ExternalLinkage, nullptr, Name));
+}
+
+LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty,
+ const char *Name,
+ unsigned AddressSpace) {
+ return wrap(new GlobalVariable(*unwrap(M), unwrap(Ty), false,
+ GlobalValue::ExternalLinkage, nullptr, Name,
+ nullptr, GlobalVariable::NotThreadLocal,
+ AddressSpace));
+}
+
+LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name) {
+ return wrap(unwrap(M)->getNamedGlobal(Name));
+}
+
+LLVMValueRef LLVMGetFirstGlobal(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::global_iterator I = Mod->global_begin();
+ if (I == Mod->global_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastGlobal(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::global_iterator I = Mod->global_end();
+ if (I == Mod->global_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextGlobal(LLVMValueRef GlobalVar) {
+ GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
+ Module::global_iterator I(GV);
+ if (++I == GV->getParent()->global_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousGlobal(LLVMValueRef GlobalVar) {
+ GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
+ Module::global_iterator I(GV);
+ if (I == GV->getParent()->global_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMDeleteGlobal(LLVMValueRef GlobalVar) {
+ unwrap<GlobalVariable>(GlobalVar)->eraseFromParent();
+}
+
+LLVMValueRef LLVMGetInitializer(LLVMValueRef GlobalVar) {
+ GlobalVariable* GV = unwrap<GlobalVariable>(GlobalVar);
+ if ( !GV->hasInitializer() )
+ return nullptr;
+ return wrap(GV->getInitializer());
+}
+
+void LLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal) {
+ unwrap<GlobalVariable>(GlobalVar)
+ ->setInitializer(unwrap<Constant>(ConstantVal));
+}
+
+LLVMBool LLVMIsThreadLocal(LLVMValueRef GlobalVar) {
+ return unwrap<GlobalVariable>(GlobalVar)->isThreadLocal();
+}
+
+void LLVMSetThreadLocal(LLVMValueRef GlobalVar, LLVMBool IsThreadLocal) {
+ unwrap<GlobalVariable>(GlobalVar)->setThreadLocal(IsThreadLocal != 0);
+}
+
+LLVMBool LLVMIsGlobalConstant(LLVMValueRef GlobalVar) {
+ return unwrap<GlobalVariable>(GlobalVar)->isConstant();
+}
+
+void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, LLVMBool IsConstant) {
+ unwrap<GlobalVariable>(GlobalVar)->setConstant(IsConstant != 0);
+}
+
+LLVMThreadLocalMode LLVMGetThreadLocalMode(LLVMValueRef GlobalVar) {
+ switch (unwrap<GlobalVariable>(GlobalVar)->getThreadLocalMode()) {
+ case GlobalVariable::NotThreadLocal:
+ return LLVMNotThreadLocal;
+ case GlobalVariable::GeneralDynamicTLSModel:
+ return LLVMGeneralDynamicTLSModel;
+ case GlobalVariable::LocalDynamicTLSModel:
+ return LLVMLocalDynamicTLSModel;
+ case GlobalVariable::InitialExecTLSModel:
+ return LLVMInitialExecTLSModel;
+ case GlobalVariable::LocalExecTLSModel:
+ return LLVMLocalExecTLSModel;
+ }
+
+ llvm_unreachable("Invalid GlobalVariable thread local mode");
+}
+
+void LLVMSetThreadLocalMode(LLVMValueRef GlobalVar, LLVMThreadLocalMode Mode) {
+ GlobalVariable *GV = unwrap<GlobalVariable>(GlobalVar);
+
+ switch (Mode) {
+ case LLVMNotThreadLocal:
+ GV->setThreadLocalMode(GlobalVariable::NotThreadLocal);
+ break;
+ case LLVMGeneralDynamicTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::GeneralDynamicTLSModel);
+ break;
+ case LLVMLocalDynamicTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::LocalDynamicTLSModel);
+ break;
+ case LLVMInitialExecTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
+ break;
+ case LLVMLocalExecTLSModel:
+ GV->setThreadLocalMode(GlobalVariable::LocalExecTLSModel);
+ break;
+ }
+}
+
+LLVMBool LLVMIsExternallyInitialized(LLVMValueRef GlobalVar) {
+ return unwrap<GlobalVariable>(GlobalVar)->isExternallyInitialized();
+}
+
+void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit) {
+ unwrap<GlobalVariable>(GlobalVar)->setExternallyInitialized(IsExtInit);
+}
+
+/*--.. Operations on aliases ......................................--*/
+
+LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
+ const char *Name) {
+ auto *PTy = cast<PointerType>(unwrap(Ty));
+ return wrap(GlobalAlias::create(PTy->getNonOpaquePointerElementType(),
+ PTy->getAddressSpace(),
+ GlobalValue::ExternalLinkage, Name,
+ unwrap<Constant>(Aliasee), unwrap(M)));
+}
+
+LLVMValueRef LLVMAddAlias2(LLVMModuleRef M, LLVMTypeRef ValueTy,
+ unsigned AddrSpace, LLVMValueRef Aliasee,
+ const char *Name) {
+ return wrap(GlobalAlias::create(unwrap(ValueTy), AddrSpace,
+ GlobalValue::ExternalLinkage, Name,
+ unwrap<Constant>(Aliasee), unwrap(M)));
+}
+
+LLVMValueRef LLVMGetNamedGlobalAlias(LLVMModuleRef M,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(M)->getNamedAlias(StringRef(Name, NameLen)));
+}
+
+LLVMValueRef LLVMGetFirstGlobalAlias(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::alias_iterator I = Mod->alias_begin();
+ if (I == Mod->alias_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastGlobalAlias(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::alias_iterator I = Mod->alias_end();
+ if (I == Mod->alias_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextGlobalAlias(LLVMValueRef GA) {
+ GlobalAlias *Alias = unwrap<GlobalAlias>(GA);
+ Module::alias_iterator I(Alias);
+ if (++I == Alias->getParent()->alias_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousGlobalAlias(LLVMValueRef GA) {
+ GlobalAlias *Alias = unwrap<GlobalAlias>(GA);
+ Module::alias_iterator I(Alias);
+ if (I == Alias->getParent()->alias_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMAliasGetAliasee(LLVMValueRef Alias) {
+ return wrap(unwrap<GlobalAlias>(Alias)->getAliasee());
+}
+
+void LLVMAliasSetAliasee(LLVMValueRef Alias, LLVMValueRef Aliasee) {
+ unwrap<GlobalAlias>(Alias)->setAliasee(unwrap<Constant>(Aliasee));
+}
+
+/*--.. Operations on functions .............................................--*/
+
+LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name,
+ LLVMTypeRef FunctionTy) {
+ return wrap(Function::Create(unwrap<FunctionType>(FunctionTy),
+ GlobalValue::ExternalLinkage, Name, unwrap(M)));
+}
+
+LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name) {
+ return wrap(unwrap(M)->getFunction(Name));
+}
+
+LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::iterator I = Mod->begin();
+ if (I == Mod->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::iterator I = Mod->end();
+ if (I == Mod->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Module::iterator I(Func);
+ if (++I == Func->getParent()->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Module::iterator I(Func);
+ if (I == Func->getParent()->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMDeleteFunction(LLVMValueRef Fn) {
+ unwrap<Function>(Fn)->eraseFromParent();
+}
+
+LLVMBool LLVMHasPersonalityFn(LLVMValueRef Fn) {
+ return unwrap<Function>(Fn)->hasPersonalityFn();
+}
+
+LLVMValueRef LLVMGetPersonalityFn(LLVMValueRef Fn) {
+ return wrap(unwrap<Function>(Fn)->getPersonalityFn());
+}
+
+void LLVMSetPersonalityFn(LLVMValueRef Fn, LLVMValueRef PersonalityFn) {
+ unwrap<Function>(Fn)->setPersonalityFn(unwrap<Constant>(PersonalityFn));
+}
+
+unsigned LLVMGetIntrinsicID(LLVMValueRef Fn) {
+ if (Function *F = dyn_cast<Function>(unwrap(Fn)))
+ return F->getIntrinsicID();
+ return 0;
+}
+
+static Intrinsic::ID llvm_map_to_intrinsic_id(unsigned ID) {
+ assert(ID < llvm::Intrinsic::num_intrinsics && "Intrinsic ID out of range");
+ return llvm::Intrinsic::ID(ID);
+}
+
+LLVMValueRef LLVMGetIntrinsicDeclaration(LLVMModuleRef Mod,
+ unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount) {
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ return wrap(llvm::Intrinsic::getDeclaration(unwrap(Mod), IID, Tys));
+}
+
+const char *LLVMIntrinsicGetName(unsigned ID, size_t *NameLength) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ auto Str = llvm::Intrinsic::getName(IID);
+ *NameLength = Str.size();
+ return Str.data();
+}
+
+LLVMTypeRef LLVMIntrinsicGetType(LLVMContextRef Ctx, unsigned ID,
+ LLVMTypeRef *ParamTypes, size_t ParamCount) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ return wrap(llvm::Intrinsic::getType(*unwrap(Ctx), IID, Tys));
+}
+
+const char *LLVMIntrinsicCopyOverloadedName(unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount,
+ size_t *NameLength) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ auto Str = llvm::Intrinsic::getNameNoUnnamedTypes(IID, Tys);
+ *NameLength = Str.length();
+ return strdup(Str.c_str());
+}
+
+const char *LLVMIntrinsicCopyOverloadedName2(LLVMModuleRef Mod, unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount,
+ size_t *NameLength) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ ArrayRef<Type *> Tys(unwrap(ParamTypes), ParamCount);
+ auto Str = llvm::Intrinsic::getName(IID, Tys, unwrap(Mod));
+ *NameLength = Str.length();
+ return strdup(Str.c_str());
+}
+
+unsigned LLVMLookupIntrinsicID(const char *Name, size_t NameLen) {
+ return Function::lookupIntrinsicID({Name, NameLen});
+}
+
+LLVMBool LLVMIntrinsicIsOverloaded(unsigned ID) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ return llvm::Intrinsic::isOverloaded(IID);
+}
+
+unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn) {
+ return unwrap<Function>(Fn)->getCallingConv();
+}
+
+void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC) {
+ return unwrap<Function>(Fn)->setCallingConv(
+ static_cast<CallingConv::ID>(CC));
+}
+
+const char *LLVMGetGC(LLVMValueRef Fn) {
+ Function *F = unwrap<Function>(Fn);
+ return F->hasGC()? F->getGC().c_str() : nullptr;
+}
+
+void LLVMSetGC(LLVMValueRef Fn, const char *GC) {
+ Function *F = unwrap<Function>(Fn);
+ if (GC)
+ F->setGC(GC);
+ else
+ F->clearGC();
+}
+
+void LLVMAddAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
+ LLVMAttributeRef A) {
+ unwrap<Function>(F)->addAttributeAtIndex(Idx, unwrap(A));
+}
+
+unsigned LLVMGetAttributeCountAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx) {
+ auto AS = unwrap<Function>(F)->getAttributes().getAttributes(Idx);
+ return AS.getNumAttributes();
+}
+
+void LLVMGetAttributesAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
+ LLVMAttributeRef *Attrs) {
+ auto AS = unwrap<Function>(F)->getAttributes().getAttributes(Idx);
+ for (auto A : AS)
+ *Attrs++ = wrap(A);
+}
+
+LLVMAttributeRef LLVMGetEnumAttributeAtIndex(LLVMValueRef F,
+ LLVMAttributeIndex Idx,
+ unsigned KindID) {
+ return wrap(unwrap<Function>(F)->getAttributeAtIndex(
+ Idx, (Attribute::AttrKind)KindID));
+}
+
+LLVMAttributeRef LLVMGetStringAttributeAtIndex(LLVMValueRef F,
+ LLVMAttributeIndex Idx,
+ const char *K, unsigned KLen) {
+ return wrap(
+ unwrap<Function>(F)->getAttributeAtIndex(Idx, StringRef(K, KLen)));
+}
+
+void LLVMRemoveEnumAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
+ unsigned KindID) {
+ unwrap<Function>(F)->removeAttributeAtIndex(Idx, (Attribute::AttrKind)KindID);
+}
+
+void LLVMRemoveStringAttributeAtIndex(LLVMValueRef F, LLVMAttributeIndex Idx,
+ const char *K, unsigned KLen) {
+ unwrap<Function>(F)->removeAttributeAtIndex(Idx, StringRef(K, KLen));
+}
+
+void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A,
+ const char *V) {
+ Function *Func = unwrap<Function>(Fn);
+ Attribute Attr = Attribute::get(Func->getContext(), A, V);
+ Func->addFnAttr(Attr);
+}
+
+/*--.. Operations on parameters ............................................--*/
+
+unsigned LLVMCountParams(LLVMValueRef FnRef) {
+ // This function is strictly redundant to
+ // LLVMCountParamTypes(LLVMGetElementType(LLVMTypeOf(FnRef)))
+ return unwrap<Function>(FnRef)->arg_size();
+}
+
+void LLVMGetParams(LLVMValueRef FnRef, LLVMValueRef *ParamRefs) {
+ Function *Fn = unwrap<Function>(FnRef);
+ for (Argument &A : Fn->args())
+ *ParamRefs++ = wrap(&A);
+}
+
+LLVMValueRef LLVMGetParam(LLVMValueRef FnRef, unsigned index) {
+ Function *Fn = unwrap<Function>(FnRef);
+ return wrap(&Fn->arg_begin()[index]);
+}
+
+LLVMValueRef LLVMGetParamParent(LLVMValueRef V) {
+ return wrap(unwrap<Argument>(V)->getParent());
+}
+
+LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::arg_iterator I = Func->arg_begin();
+ if (I == Func->arg_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::arg_iterator I = Func->arg_end();
+ if (I == Func->arg_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg) {
+ Argument *A = unwrap<Argument>(Arg);
+ Function *Fn = A->getParent();
+ if (A->getArgNo() + 1 >= Fn->arg_size())
+ return nullptr;
+ return wrap(&Fn->arg_begin()[A->getArgNo() + 1]);
+}
+
+LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
+ Argument *A = unwrap<Argument>(Arg);
+ if (A->getArgNo() == 0)
+ return nullptr;
+ return wrap(&A->getParent()->arg_begin()[A->getArgNo() - 1]);
+}
+
+void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) {
+ Argument *A = unwrap<Argument>(Arg);
+ A->addAttr(Attribute::getWithAlignment(A->getContext(), Align(align)));
+}
+
+/*--.. Operations on ifuncs ................................................--*/
+
+LLVMValueRef LLVMAddGlobalIFunc(LLVMModuleRef M,
+ const char *Name, size_t NameLen,
+ LLVMTypeRef Ty, unsigned AddrSpace,
+ LLVMValueRef Resolver) {
+ return wrap(GlobalIFunc::create(unwrap(Ty), AddrSpace,
+ GlobalValue::ExternalLinkage,
+ StringRef(Name, NameLen),
+ unwrap<Constant>(Resolver), unwrap(M)));
+}
+
+LLVMValueRef LLVMGetNamedGlobalIFunc(LLVMModuleRef M,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(M)->getNamedIFunc(StringRef(Name, NameLen)));
+}
+
+LLVMValueRef LLVMGetFirstGlobalIFunc(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::ifunc_iterator I = Mod->ifunc_begin();
+ if (I == Mod->ifunc_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastGlobalIFunc(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::ifunc_iterator I = Mod->ifunc_end();
+ if (I == Mod->ifunc_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextGlobalIFunc(LLVMValueRef IFunc) {
+ GlobalIFunc *GIF = unwrap<GlobalIFunc>(IFunc);
+ Module::ifunc_iterator I(GIF);
+ if (++I == GIF->getParent()->ifunc_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousGlobalIFunc(LLVMValueRef IFunc) {
+ GlobalIFunc *GIF = unwrap<GlobalIFunc>(IFunc);
+ Module::ifunc_iterator I(GIF);
+ if (I == GIF->getParent()->ifunc_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetGlobalIFuncResolver(LLVMValueRef IFunc) {
+ return wrap(unwrap<GlobalIFunc>(IFunc)->getResolver());
+}
+
+void LLVMSetGlobalIFuncResolver(LLVMValueRef IFunc, LLVMValueRef Resolver) {
+ unwrap<GlobalIFunc>(IFunc)->setResolver(unwrap<Constant>(Resolver));
+}
+
+void LLVMEraseGlobalIFunc(LLVMValueRef IFunc) {
+ unwrap<GlobalIFunc>(IFunc)->eraseFromParent();
+}
+
+void LLVMRemoveGlobalIFunc(LLVMValueRef IFunc) {
+ unwrap<GlobalIFunc>(IFunc)->removeFromParent();
+}
+
+/*--.. Operations on basic blocks ..........................................--*/
+
+LLVMValueRef LLVMBasicBlockAsValue(LLVMBasicBlockRef BB) {
+ return wrap(static_cast<Value*>(unwrap(BB)));
+}
+
+LLVMBool LLVMValueIsBasicBlock(LLVMValueRef Val) {
+ return isa<BasicBlock>(unwrap(Val));
+}
+
+LLVMBasicBlockRef LLVMValueAsBasicBlock(LLVMValueRef Val) {
+ return wrap(unwrap<BasicBlock>(Val));
+}
+
+const char *LLVMGetBasicBlockName(LLVMBasicBlockRef BB) {
+ return unwrap(BB)->getName().data();
+}
+
+LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB) {
+ return wrap(unwrap(BB)->getParent());
+}
+
+LLVMValueRef LLVMGetBasicBlockTerminator(LLVMBasicBlockRef BB) {
+ return wrap(unwrap(BB)->getTerminator());
+}
+
+unsigned LLVMCountBasicBlocks(LLVMValueRef FnRef) {
+ return unwrap<Function>(FnRef)->size();
+}
+
+void LLVMGetBasicBlocks(LLVMValueRef FnRef, LLVMBasicBlockRef *BasicBlocksRefs){
+ Function *Fn = unwrap<Function>(FnRef);
+ for (BasicBlock &BB : *Fn)
+ *BasicBlocksRefs++ = wrap(&BB);
+}
+
+LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn) {
+ return wrap(&unwrap<Function>(Fn)->getEntryBlock());
+}
+
+LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::iterator I = Func->begin();
+ if (I == Func->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn) {
+ Function *Func = unwrap<Function>(Fn);
+ Function::iterator I = Func->end();
+ if (I == Func->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ Function::iterator I(Block);
+ if (++I == Block->getParent()->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ Function::iterator I(Block);
+ if (I == Block->getParent()->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMBasicBlockRef LLVMCreateBasicBlockInContext(LLVMContextRef C,
+ const char *Name) {
+ return wrap(llvm::BasicBlock::Create(*unwrap(C), Name));
+}
+
+void LLVMInsertExistingBasicBlockAfterInsertBlock(LLVMBuilderRef Builder,
+ LLVMBasicBlockRef BB) {
+ BasicBlock *ToInsert = unwrap(BB);
+ BasicBlock *CurBB = unwrap(Builder)->GetInsertBlock();
+ assert(CurBB && "current insertion point is invalid!");
+ CurBB->getParent()->getBasicBlockList().insertAfter(CurBB->getIterator(),
+ ToInsert);
+}
+
+void LLVMAppendExistingBasicBlock(LLVMValueRef Fn,
+ LLVMBasicBlockRef BB) {
+ unwrap<Function>(Fn)->getBasicBlockList().push_back(unwrap(BB));
+}
+
+LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C,
+ LLVMValueRef FnRef,
+ const char *Name) {
+ return wrap(BasicBlock::Create(*unwrap(C), Name, unwrap<Function>(FnRef)));
+}
+
+LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef FnRef, const char *Name) {
+ return LLVMAppendBasicBlockInContext(LLVMGetGlobalContext(), FnRef, Name);
+}
+
+LLVMBasicBlockRef LLVMInsertBasicBlockInContext(LLVMContextRef C,
+ LLVMBasicBlockRef BBRef,
+ const char *Name) {
+ BasicBlock *BB = unwrap(BBRef);
+ return wrap(BasicBlock::Create(*unwrap(C), Name, BB->getParent(), BB));
+}
+
+LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef BBRef,
+ const char *Name) {
+ return LLVMInsertBasicBlockInContext(LLVMGetGlobalContext(), BBRef, Name);
+}
+
+void LLVMDeleteBasicBlock(LLVMBasicBlockRef BBRef) {
+ unwrap(BBRef)->eraseFromParent();
+}
+
+void LLVMRemoveBasicBlockFromParent(LLVMBasicBlockRef BBRef) {
+ unwrap(BBRef)->removeFromParent();
+}
+
+void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
+ unwrap(BB)->moveBefore(unwrap(MovePos));
+}
+
+void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
+ unwrap(BB)->moveAfter(unwrap(MovePos));
+}
+
+/*--.. Operations on instructions ..........................................--*/
+
+LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst) {
+ return wrap(unwrap<Instruction>(Inst)->getParent());
+}
+
+LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ BasicBlock::iterator I = Block->begin();
+ if (I == Block->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB) {
+ BasicBlock *Block = unwrap(BB);
+ BasicBlock::iterator I = Block->end();
+ if (I == Block->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst) {
+ Instruction *Instr = unwrap<Instruction>(Inst);
+ BasicBlock::iterator I(Instr);
+ if (++I == Instr->getParent()->end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst) {
+ Instruction *Instr = unwrap<Instruction>(Inst);
+ BasicBlock::iterator I(Instr);
+ if (I == Instr->getParent()->begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+void LLVMInstructionRemoveFromParent(LLVMValueRef Inst) {
+ unwrap<Instruction>(Inst)->removeFromParent();
+}
+
+void LLVMInstructionEraseFromParent(LLVMValueRef Inst) {
+ unwrap<Instruction>(Inst)->eraseFromParent();
+}
+
+void LLVMDeleteInstruction(LLVMValueRef Inst) {
+ unwrap<Instruction>(Inst)->deleteValue();
+}
+
+LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst) {
+ if (ICmpInst *I = dyn_cast<ICmpInst>(unwrap(Inst)))
+ return (LLVMIntPredicate)I->getPredicate();
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(unwrap(Inst)))
+ if (CE->getOpcode() == Instruction::ICmp)
+ return (LLVMIntPredicate)CE->getPredicate();
+ return (LLVMIntPredicate)0;
+}
+
+LLVMRealPredicate LLVMGetFCmpPredicate(LLVMValueRef Inst) {
+ if (FCmpInst *I = dyn_cast<FCmpInst>(unwrap(Inst)))
+ return (LLVMRealPredicate)I->getPredicate();
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(unwrap(Inst)))
+ if (CE->getOpcode() == Instruction::FCmp)
+ return (LLVMRealPredicate)CE->getPredicate();
+ return (LLVMRealPredicate)0;
+}
+
+LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst) {
+ if (Instruction *C = dyn_cast<Instruction>(unwrap(Inst)))
+ return map_to_llvmopcode(C->getOpcode());
+ return (LLVMOpcode)0;
+}
+
+LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst) {
+ if (Instruction *C = dyn_cast<Instruction>(unwrap(Inst)))
+ return wrap(C->clone());
+ return nullptr;
+}
+
+LLVMValueRef LLVMIsATerminatorInst(LLVMValueRef Inst) {
+ Instruction *I = dyn_cast<Instruction>(unwrap(Inst));
+ return (I && I->isTerminator()) ? wrap(I) : nullptr;
+}
+
+unsigned LLVMGetNumArgOperands(LLVMValueRef Instr) {
+ if (FuncletPadInst *FPI = dyn_cast<FuncletPadInst>(unwrap(Instr))) {
+ return FPI->getNumArgOperands();
+ }
+ return unwrap<CallBase>(Instr)->arg_size();
+}
+
+/*--.. Call and invoke instructions ........................................--*/
+
+unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) {
+ return unwrap<CallBase>(Instr)->getCallingConv();
+}
+
+void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
+ return unwrap<CallBase>(Instr)->setCallingConv(
+ static_cast<CallingConv::ID>(CC));
+}
+
+void LLVMSetInstrParamAlignment(LLVMValueRef Instr, LLVMAttributeIndex Idx,
+ unsigned align) {
+ auto *Call = unwrap<CallBase>(Instr);
+ Attribute AlignAttr =
+ Attribute::getWithAlignment(Call->getContext(), Align(align));
+ Call->addAttributeAtIndex(Idx, AlignAttr);
+}
+
+void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
+ LLVMAttributeRef A) {
+ unwrap<CallBase>(C)->addAttributeAtIndex(Idx, unwrap(A));
+}
+
+unsigned LLVMGetCallSiteAttributeCount(LLVMValueRef C,
+ LLVMAttributeIndex Idx) {
+ auto *Call = unwrap<CallBase>(C);
+ auto AS = Call->getAttributes().getAttributes(Idx);
+ return AS.getNumAttributes();
+}
+
+void LLVMGetCallSiteAttributes(LLVMValueRef C, LLVMAttributeIndex Idx,
+ LLVMAttributeRef *Attrs) {
+ auto *Call = unwrap<CallBase>(C);
+ auto AS = Call->getAttributes().getAttributes(Idx);
+ for (auto A : AS)
+ *Attrs++ = wrap(A);
+}
+
+LLVMAttributeRef LLVMGetCallSiteEnumAttribute(LLVMValueRef C,
+ LLVMAttributeIndex Idx,
+ unsigned KindID) {
+ return wrap(unwrap<CallBase>(C)->getAttributeAtIndex(
+ Idx, (Attribute::AttrKind)KindID));
+}
+
+LLVMAttributeRef LLVMGetCallSiteStringAttribute(LLVMValueRef C,
+ LLVMAttributeIndex Idx,
+ const char *K, unsigned KLen) {
+ return wrap(
+ unwrap<CallBase>(C)->getAttributeAtIndex(Idx, StringRef(K, KLen)));
+}
+
+void LLVMRemoveCallSiteEnumAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
+ unsigned KindID) {
+ unwrap<CallBase>(C)->removeAttributeAtIndex(Idx, (Attribute::AttrKind)KindID);
+}
+
+void LLVMRemoveCallSiteStringAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
+ const char *K, unsigned KLen) {
+ unwrap<CallBase>(C)->removeAttributeAtIndex(Idx, StringRef(K, KLen));
+}
+
+LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr) {
+ return wrap(unwrap<CallBase>(Instr)->getCalledOperand());
+}
+
+LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef Instr) {
+ return wrap(unwrap<CallBase>(Instr)->getFunctionType());
+}
+
+/*--.. Operations on call instructions (only) ..............................--*/
+
+LLVMBool LLVMIsTailCall(LLVMValueRef Call) {
+ return unwrap<CallInst>(Call)->isTailCall();
+}
+
+void LLVMSetTailCall(LLVMValueRef Call, LLVMBool isTailCall) {
+ unwrap<CallInst>(Call)->setTailCall(isTailCall);
+}
+
+/*--.. Operations on invoke instructions (only) ............................--*/
+
+LLVMBasicBlockRef LLVMGetNormalDest(LLVMValueRef Invoke) {
+ return wrap(unwrap<InvokeInst>(Invoke)->getNormalDest());
+}
+
+LLVMBasicBlockRef LLVMGetUnwindDest(LLVMValueRef Invoke) {
+ if (CleanupReturnInst *CRI = dyn_cast<CleanupReturnInst>(unwrap(Invoke))) {
+ return wrap(CRI->getUnwindDest());
+ } else if (CatchSwitchInst *CSI = dyn_cast<CatchSwitchInst>(unwrap(Invoke))) {
+ return wrap(CSI->getUnwindDest());
+ }
+ return wrap(unwrap<InvokeInst>(Invoke)->getUnwindDest());
+}
+
+void LLVMSetNormalDest(LLVMValueRef Invoke, LLVMBasicBlockRef B) {
+ unwrap<InvokeInst>(Invoke)->setNormalDest(unwrap(B));
+}
+
+void LLVMSetUnwindDest(LLVMValueRef Invoke, LLVMBasicBlockRef B) {
+ if (CleanupReturnInst *CRI = dyn_cast<CleanupReturnInst>(unwrap(Invoke))) {
+ return CRI->setUnwindDest(unwrap(B));
+ } else if (CatchSwitchInst *CSI = dyn_cast<CatchSwitchInst>(unwrap(Invoke))) {
+ return CSI->setUnwindDest(unwrap(B));
+ }
+ unwrap<InvokeInst>(Invoke)->setUnwindDest(unwrap(B));
+}
+
+/*--.. Operations on terminators ...........................................--*/
+
+unsigned LLVMGetNumSuccessors(LLVMValueRef Term) {
+ return unwrap<Instruction>(Term)->getNumSuccessors();
+}
+
+LLVMBasicBlockRef LLVMGetSuccessor(LLVMValueRef Term, unsigned i) {
+ return wrap(unwrap<Instruction>(Term)->getSuccessor(i));
+}
+
+void LLVMSetSuccessor(LLVMValueRef Term, unsigned i, LLVMBasicBlockRef block) {
+ return unwrap<Instruction>(Term)->setSuccessor(i, unwrap(block));
+}
+
+/*--.. Operations on branch instructions (only) ............................--*/
+
+LLVMBool LLVMIsConditional(LLVMValueRef Branch) {
+ return unwrap<BranchInst>(Branch)->isConditional();
+}
+
+LLVMValueRef LLVMGetCondition(LLVMValueRef Branch) {
+ return wrap(unwrap<BranchInst>(Branch)->getCondition());
+}
+
+void LLVMSetCondition(LLVMValueRef Branch, LLVMValueRef Cond) {
+ return unwrap<BranchInst>(Branch)->setCondition(unwrap(Cond));
+}
+
+/*--.. Operations on switch instructions (only) ............................--*/
+
+LLVMBasicBlockRef LLVMGetSwitchDefaultDest(LLVMValueRef Switch) {
+ return wrap(unwrap<SwitchInst>(Switch)->getDefaultDest());
+}
+
+/*--.. Operations on alloca instructions (only) ............................--*/
+
+LLVMTypeRef LLVMGetAllocatedType(LLVMValueRef Alloca) {
+ return wrap(unwrap<AllocaInst>(Alloca)->getAllocatedType());
+}
+
+/*--.. Operations on gep instructions (only) ...............................--*/
+
+LLVMBool LLVMIsInBounds(LLVMValueRef GEP) {
+ return unwrap<GEPOperator>(GEP)->isInBounds();
+}
+
+void LLVMSetIsInBounds(LLVMValueRef GEP, LLVMBool InBounds) {
+ return unwrap<GetElementPtrInst>(GEP)->setIsInBounds(InBounds);
+}
+
+LLVMTypeRef LLVMGetGEPSourceElementType(LLVMValueRef GEP) {
+ return wrap(unwrap<GEPOperator>(GEP)->getSourceElementType());
+}
+
+/*--.. Operations on phi nodes .............................................--*/
+
+void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues,
+ LLVMBasicBlockRef *IncomingBlocks, unsigned Count) {
+ PHINode *PhiVal = unwrap<PHINode>(PhiNode);
+ for (unsigned I = 0; I != Count; ++I)
+ PhiVal->addIncoming(unwrap(IncomingValues[I]), unwrap(IncomingBlocks[I]));
+}
+
+unsigned LLVMCountIncoming(LLVMValueRef PhiNode) {
+ return unwrap<PHINode>(PhiNode)->getNumIncomingValues();
+}
+
+LLVMValueRef LLVMGetIncomingValue(LLVMValueRef PhiNode, unsigned Index) {
+ return wrap(unwrap<PHINode>(PhiNode)->getIncomingValue(Index));
+}
+
+LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index) {
+ return wrap(unwrap<PHINode>(PhiNode)->getIncomingBlock(Index));
+}
+
+/*--.. Operations on extractvalue and insertvalue nodes ....................--*/
+
+unsigned LLVMGetNumIndices(LLVMValueRef Inst) {
+ auto *I = unwrap(Inst);
+ if (auto *GEP = dyn_cast<GEPOperator>(I))
+ return GEP->getNumIndices();
+ if (auto *EV = dyn_cast<ExtractValueInst>(I))
+ return EV->getNumIndices();
+ if (auto *IV = dyn_cast<InsertValueInst>(I))
+ return IV->getNumIndices();
+ llvm_unreachable(
+ "LLVMGetNumIndices applies only to extractvalue and insertvalue!");
+}
+
+const unsigned *LLVMGetIndices(LLVMValueRef Inst) {
+ auto *I = unwrap(Inst);
+ if (auto *EV = dyn_cast<ExtractValueInst>(I))
+ return EV->getIndices().data();
+ if (auto *IV = dyn_cast<InsertValueInst>(I))
+ return IV->getIndices().data();
+ llvm_unreachable(
+ "LLVMGetIndices applies only to extractvalue and insertvalue!");
+}
+
+
+/*===-- Instruction builders ----------------------------------------------===*/
+
+LLVMBuilderRef LLVMCreateBuilderInContext(LLVMContextRef C) {
+ return wrap(new IRBuilder<>(*unwrap(C)));
+}
+
+LLVMBuilderRef LLVMCreateBuilder(void) {
+ return LLVMCreateBuilderInContext(LLVMGetGlobalContext());
+}
+
+void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block,
+ LLVMValueRef Instr) {
+ BasicBlock *BB = unwrap(Block);
+ auto I = Instr ? unwrap<Instruction>(Instr)->getIterator() : BB->end();
+ unwrap(Builder)->SetInsertPoint(BB, I);
+}
+
+void LLVMPositionBuilderBefore(LLVMBuilderRef Builder, LLVMValueRef Instr) {
+ Instruction *I = unwrap<Instruction>(Instr);
+ unwrap(Builder)->SetInsertPoint(I->getParent(), I->getIterator());
+}
+
+void LLVMPositionBuilderAtEnd(LLVMBuilderRef Builder, LLVMBasicBlockRef Block) {
+ BasicBlock *BB = unwrap(Block);
+ unwrap(Builder)->SetInsertPoint(BB);
+}
+
+LLVMBasicBlockRef LLVMGetInsertBlock(LLVMBuilderRef Builder) {
+ return wrap(unwrap(Builder)->GetInsertBlock());
+}
+
+void LLVMClearInsertionPosition(LLVMBuilderRef Builder) {
+ unwrap(Builder)->ClearInsertionPoint();
+}
+
+void LLVMInsertIntoBuilder(LLVMBuilderRef Builder, LLVMValueRef Instr) {
+ unwrap(Builder)->Insert(unwrap<Instruction>(Instr));
+}
+
+void LLVMInsertIntoBuilderWithName(LLVMBuilderRef Builder, LLVMValueRef Instr,
+ const char *Name) {
+ unwrap(Builder)->Insert(unwrap<Instruction>(Instr), Name);
+}
+
+void LLVMDisposeBuilder(LLVMBuilderRef Builder) {
+ delete unwrap(Builder);
+}
+
+/*--.. Metadata builders ...................................................--*/
+
+LLVMMetadataRef LLVMGetCurrentDebugLocation2(LLVMBuilderRef Builder) {
+ return wrap(unwrap(Builder)->getCurrentDebugLocation().getAsMDNode());
+}
+
+void LLVMSetCurrentDebugLocation2(LLVMBuilderRef Builder, LLVMMetadataRef Loc) {
+ if (Loc)
+ unwrap(Builder)->SetCurrentDebugLocation(DebugLoc(unwrap<MDNode>(Loc)));
+ else
+ unwrap(Builder)->SetCurrentDebugLocation(DebugLoc());
+}
+
+void LLVMSetCurrentDebugLocation(LLVMBuilderRef Builder, LLVMValueRef L) {
+ MDNode *Loc =
+ L ? cast<MDNode>(unwrap<MetadataAsValue>(L)->getMetadata()) : nullptr;
+ unwrap(Builder)->SetCurrentDebugLocation(DebugLoc(Loc));
+}
+
+LLVMValueRef LLVMGetCurrentDebugLocation(LLVMBuilderRef Builder) {
+ LLVMContext &Context = unwrap(Builder)->getContext();
+ return wrap(MetadataAsValue::get(
+ Context, unwrap(Builder)->getCurrentDebugLocation().getAsMDNode()));
+}
+
+void LLVMSetInstDebugLocation(LLVMBuilderRef Builder, LLVMValueRef Inst) {
+ unwrap(Builder)->SetInstDebugLocation(unwrap<Instruction>(Inst));
+}
+
+void LLVMAddMetadataToInst(LLVMBuilderRef Builder, LLVMValueRef Inst) {
+ unwrap(Builder)->AddMetadataToInst(unwrap<Instruction>(Inst));
+}
+
+void LLVMBuilderSetDefaultFPMathTag(LLVMBuilderRef Builder,
+ LLVMMetadataRef FPMathTag) {
+
+ unwrap(Builder)->setDefaultFPMathTag(FPMathTag
+ ? unwrap<MDNode>(FPMathTag)
+ : nullptr);
+}
+
+LLVMMetadataRef LLVMBuilderGetDefaultFPMathTag(LLVMBuilderRef Builder) {
+ return wrap(unwrap(Builder)->getDefaultFPMathTag());
+}
+
+/*--.. Instruction builders ................................................--*/
+
+LLVMValueRef LLVMBuildRetVoid(LLVMBuilderRef B) {
+ return wrap(unwrap(B)->CreateRetVoid());
+}
+
+LLVMValueRef LLVMBuildRet(LLVMBuilderRef B, LLVMValueRef V) {
+ return wrap(unwrap(B)->CreateRet(unwrap(V)));
+}
+
+LLVMValueRef LLVMBuildAggregateRet(LLVMBuilderRef B, LLVMValueRef *RetVals,
+ unsigned N) {
+ return wrap(unwrap(B)->CreateAggregateRet(unwrap(RetVals), N));
+}
+
+LLVMValueRef LLVMBuildBr(LLVMBuilderRef B, LLVMBasicBlockRef Dest) {
+ return wrap(unwrap(B)->CreateBr(unwrap(Dest)));
+}
+
+LLVMValueRef LLVMBuildCondBr(LLVMBuilderRef B, LLVMValueRef If,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Else) {
+ return wrap(unwrap(B)->CreateCondBr(unwrap(If), unwrap(Then), unwrap(Else)));
+}
+
+LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef B, LLVMValueRef V,
+ LLVMBasicBlockRef Else, unsigned NumCases) {
+ return wrap(unwrap(B)->CreateSwitch(unwrap(V), unwrap(Else), NumCases));
+}
+
+LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr,
+ unsigned NumDests) {
+ return wrap(unwrap(B)->CreateIndirectBr(unwrap(Addr), NumDests));
+}
+
+LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ const char *Name) {
+ Value *V = unwrap(Fn);
+ FunctionType *FnT =
+ cast<FunctionType>(V->getType()->getNonOpaquePointerElementType());
+
+ return wrap(
+ unwrap(B)->CreateInvoke(FnT, unwrap(Fn), unwrap(Then), unwrap(Catch),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildInvoke2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInvoke(
+ unwrap<FunctionType>(Ty), unwrap(Fn), unwrap(Then), unwrap(Catch),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef PersFn, unsigned NumClauses,
+ const char *Name) {
+ // The personality used to live on the landingpad instruction, but now it
+ // lives on the parent function. For compatibility, take the provided
+ // personality and put it on the parent function.
+ if (PersFn)
+ unwrap(B)->GetInsertBlock()->getParent()->setPersonalityFn(
+ cast<Function>(unwrap(PersFn)));
+ return wrap(unwrap(B)->CreateLandingPad(unwrap(Ty), NumClauses, Name));
+}
+
+LLVMValueRef LLVMBuildCatchPad(LLVMBuilderRef B, LLVMValueRef ParentPad,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateCatchPad(unwrap(ParentPad),
+ makeArrayRef(unwrap(Args), NumArgs),
+ Name));
+}
+
+LLVMValueRef LLVMBuildCleanupPad(LLVMBuilderRef B, LLVMValueRef ParentPad,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name) {
+ if (ParentPad == nullptr) {
+ Type *Ty = Type::getTokenTy(unwrap(B)->getContext());
+ ParentPad = wrap(Constant::getNullValue(Ty));
+ }
+ return wrap(unwrap(B)->CreateCleanupPad(unwrap(ParentPad),
+ makeArrayRef(unwrap(Args), NumArgs),
+ Name));
+}
+
+LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn) {
+ return wrap(unwrap(B)->CreateResume(unwrap(Exn)));
+}
+
+LLVMValueRef LLVMBuildCatchSwitch(LLVMBuilderRef B, LLVMValueRef ParentPad,
+ LLVMBasicBlockRef UnwindBB,
+ unsigned NumHandlers, const char *Name) {
+ if (ParentPad == nullptr) {
+ Type *Ty = Type::getTokenTy(unwrap(B)->getContext());
+ ParentPad = wrap(Constant::getNullValue(Ty));
+ }
+ return wrap(unwrap(B)->CreateCatchSwitch(unwrap(ParentPad), unwrap(UnwindBB),
+ NumHandlers, Name));
+}
+
+LLVMValueRef LLVMBuildCatchRet(LLVMBuilderRef B, LLVMValueRef CatchPad,
+ LLVMBasicBlockRef BB) {
+ return wrap(unwrap(B)->CreateCatchRet(unwrap<CatchPadInst>(CatchPad),
+ unwrap(BB)));
+}
+
+LLVMValueRef LLVMBuildCleanupRet(LLVMBuilderRef B, LLVMValueRef CatchPad,
+ LLVMBasicBlockRef BB) {
+ return wrap(unwrap(B)->CreateCleanupRet(unwrap<CleanupPadInst>(CatchPad),
+ unwrap(BB)));
+}
+
+LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef B) {
+ return wrap(unwrap(B)->CreateUnreachable());
+}
+
+void LLVMAddCase(LLVMValueRef Switch, LLVMValueRef OnVal,
+ LLVMBasicBlockRef Dest) {
+ unwrap<SwitchInst>(Switch)->addCase(unwrap<ConstantInt>(OnVal), unwrap(Dest));
+}
+
+void LLVMAddDestination(LLVMValueRef IndirectBr, LLVMBasicBlockRef Dest) {
+ unwrap<IndirectBrInst>(IndirectBr)->addDestination(unwrap(Dest));
+}
+
+unsigned LLVMGetNumClauses(LLVMValueRef LandingPad) {
+ return unwrap<LandingPadInst>(LandingPad)->getNumClauses();
+}
+
+LLVMValueRef LLVMGetClause(LLVMValueRef LandingPad, unsigned Idx) {
+ return wrap(unwrap<LandingPadInst>(LandingPad)->getClause(Idx));
+}
+
+void LLVMAddClause(LLVMValueRef LandingPad, LLVMValueRef ClauseVal) {
+ unwrap<LandingPadInst>(LandingPad)->
+ addClause(cast<Constant>(unwrap(ClauseVal)));
+}
+
+LLVMBool LLVMIsCleanup(LLVMValueRef LandingPad) {
+ return unwrap<LandingPadInst>(LandingPad)->isCleanup();
+}
+
+void LLVMSetCleanup(LLVMValueRef LandingPad, LLVMBool Val) {
+ unwrap<LandingPadInst>(LandingPad)->setCleanup(Val);
+}
+
+void LLVMAddHandler(LLVMValueRef CatchSwitch, LLVMBasicBlockRef Dest) {
+ unwrap<CatchSwitchInst>(CatchSwitch)->addHandler(unwrap(Dest));
+}
+
+unsigned LLVMGetNumHandlers(LLVMValueRef CatchSwitch) {
+ return unwrap<CatchSwitchInst>(CatchSwitch)->getNumHandlers();
+}
+
+void LLVMGetHandlers(LLVMValueRef CatchSwitch, LLVMBasicBlockRef *Handlers) {
+ CatchSwitchInst *CSI = unwrap<CatchSwitchInst>(CatchSwitch);
+ for (const BasicBlock *H : CSI->handlers())
+ *Handlers++ = wrap(H);
+}
+
+LLVMValueRef LLVMGetParentCatchSwitch(LLVMValueRef CatchPad) {
+ return wrap(unwrap<CatchPadInst>(CatchPad)->getCatchSwitch());
+}
+
+void LLVMSetParentCatchSwitch(LLVMValueRef CatchPad, LLVMValueRef CatchSwitch) {
+ unwrap<CatchPadInst>(CatchPad)
+ ->setCatchSwitch(unwrap<CatchSwitchInst>(CatchSwitch));
+}
+
+/*--.. Funclets ...........................................................--*/
+
+LLVMValueRef LLVMGetArgOperand(LLVMValueRef Funclet, unsigned i) {
+ return wrap(unwrap<FuncletPadInst>(Funclet)->getArgOperand(i));
+}
+
+void LLVMSetArgOperand(LLVMValueRef Funclet, unsigned i, LLVMValueRef value) {
+ unwrap<FuncletPadInst>(Funclet)->setArgOperand(i, unwrap(value));
+}
+
+/*--.. Arithmetic ..........................................................--*/
+
+LLVMValueRef LLVMBuildAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNSWAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNUWAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFAdd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFAdd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNSWSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNUWSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFSub(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFSub(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNSWMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNUWMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFMul(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFMul(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildUDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateUDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildExactUDiv(LLVMBuilderRef B, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name) {
+ return wrap(unwrap(B)->CreateExactUDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildSDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildExactSDiv(LLVMBuilderRef B, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name) {
+ return wrap(unwrap(B)->CreateExactSDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFDiv(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFDiv(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildURem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateURem(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildSRem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSRem(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFRem(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFRem(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildShl(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateShl(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildLShr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateLShr(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildAShr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAShr(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildAnd(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAnd(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildOr(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateOr(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildXor(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateXor(unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildBinOp(LLVMBuilderRef B, LLVMOpcode Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateBinOp(Instruction::BinaryOps(map_from_llvmopcode(Op)), unwrap(LHS),
+ unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
+ return wrap(unwrap(B)->CreateNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildNSWNeg(LLVMBuilderRef B, LLVMValueRef V,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNSWNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B, LLVMValueRef V,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateNUWNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildFNeg(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
+ return wrap(unwrap(B)->CreateFNeg(unwrap(V), Name));
+}
+
+LLVMValueRef LLVMBuildNot(LLVMBuilderRef B, LLVMValueRef V, const char *Name) {
+ return wrap(unwrap(B)->CreateNot(unwrap(V), Name));
+}
+
+/*--.. Memory ..............................................................--*/
+
+LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
+ const char *Name) {
+ Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
+ Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty));
+ AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
+ Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
+ ITy, unwrap(Ty), AllocSize,
+ nullptr, nullptr, "");
+ return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
+}
+
+LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Val, const char *Name) {
+ Type* ITy = Type::getInt32Ty(unwrap(B)->GetInsertBlock()->getContext());
+ Constant* AllocSize = ConstantExpr::getSizeOf(unwrap(Ty));
+ AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, ITy);
+ Instruction* Malloc = CallInst::CreateMalloc(unwrap(B)->GetInsertBlock(),
+ ITy, unwrap(Ty), AllocSize,
+ unwrap(Val), nullptr, "");
+ return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
+}
+
+LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr,
+ LLVMValueRef Val, LLVMValueRef Len,
+ unsigned Align) {
+ return wrap(unwrap(B)->CreateMemSet(unwrap(Ptr), unwrap(Val), unwrap(Len),
+ MaybeAlign(Align)));
+}
+
+LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size) {
+ return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), MaybeAlign(DstAlign),
+ unwrap(Src), MaybeAlign(SrcAlign),
+ unwrap(Size)));
+}
+
+LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size) {
+ return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), MaybeAlign(DstAlign),
+ unwrap(Src), MaybeAlign(SrcAlign),
+ unwrap(Size)));
+}
+
+LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), nullptr, Name));
+}
+
+LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Val, const char *Name) {
+ return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildFree(LLVMBuilderRef B, LLVMValueRef PointerVal) {
+ return wrap(unwrap(B)->Insert(
+ CallInst::CreateFree(unwrap(PointerVal), unwrap(B)->GetInsertBlock())));
+}
+
+LLVMValueRef LLVMBuildLoad(LLVMBuilderRef B, LLVMValueRef PointerVal,
+ const char *Name) {
+ Value *V = unwrap(PointerVal);
+ PointerType *Ty = cast<PointerType>(V->getType());
+
+ return wrap(
+ unwrap(B)->CreateLoad(Ty->getNonOpaquePointerElementType(), V, Name));
+}
+
+LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef PointerVal, const char *Name) {
+ return wrap(unwrap(B)->CreateLoad(unwrap(Ty), unwrap(PointerVal), Name));
+}
+
+LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMValueRef PointerVal) {
+ return wrap(unwrap(B)->CreateStore(unwrap(Val), unwrap(PointerVal)));
+}
+
+static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
+ switch (Ordering) {
+ case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic;
+ case LLVMAtomicOrderingUnordered: return AtomicOrdering::Unordered;
+ case LLVMAtomicOrderingMonotonic: return AtomicOrdering::Monotonic;
+ case LLVMAtomicOrderingAcquire: return AtomicOrdering::Acquire;
+ case LLVMAtomicOrderingRelease: return AtomicOrdering::Release;
+ case LLVMAtomicOrderingAcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case LLVMAtomicOrderingSequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
+ }
+
+ llvm_unreachable("Invalid LLVMAtomicOrdering value!");
+}
+
+static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ case AtomicOrdering::NotAtomic: return LLVMAtomicOrderingNotAtomic;
+ case AtomicOrdering::Unordered: return LLVMAtomicOrderingUnordered;
+ case AtomicOrdering::Monotonic: return LLVMAtomicOrderingMonotonic;
+ case AtomicOrdering::Acquire: return LLVMAtomicOrderingAcquire;
+ case AtomicOrdering::Release: return LLVMAtomicOrderingRelease;
+ case AtomicOrdering::AcquireRelease:
+ return LLVMAtomicOrderingAcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return LLVMAtomicOrderingSequentiallyConsistent;
+ }
+
+ llvm_unreachable("Invalid AtomicOrdering value!");
+}
+
+static AtomicRMWInst::BinOp mapFromLLVMRMWBinOp(LLVMAtomicRMWBinOp BinOp) {
+ switch (BinOp) {
+ case LLVMAtomicRMWBinOpXchg: return AtomicRMWInst::Xchg;
+ case LLVMAtomicRMWBinOpAdd: return AtomicRMWInst::Add;
+ case LLVMAtomicRMWBinOpSub: return AtomicRMWInst::Sub;
+ case LLVMAtomicRMWBinOpAnd: return AtomicRMWInst::And;
+ case LLVMAtomicRMWBinOpNand: return AtomicRMWInst::Nand;
+ case LLVMAtomicRMWBinOpOr: return AtomicRMWInst::Or;
+ case LLVMAtomicRMWBinOpXor: return AtomicRMWInst::Xor;
+ case LLVMAtomicRMWBinOpMax: return AtomicRMWInst::Max;
+ case LLVMAtomicRMWBinOpMin: return AtomicRMWInst::Min;
+ case LLVMAtomicRMWBinOpUMax: return AtomicRMWInst::UMax;
+ case LLVMAtomicRMWBinOpUMin: return AtomicRMWInst::UMin;
+ case LLVMAtomicRMWBinOpFAdd: return AtomicRMWInst::FAdd;
+ case LLVMAtomicRMWBinOpFSub: return AtomicRMWInst::FSub;
+ case LLVMAtomicRMWBinOpFMax: return AtomicRMWInst::FMax;
+ case LLVMAtomicRMWBinOpFMin: return AtomicRMWInst::FMin;
+ }
+
+ llvm_unreachable("Invalid LLVMAtomicRMWBinOp value!");
+}
+
+static LLVMAtomicRMWBinOp mapToLLVMRMWBinOp(AtomicRMWInst::BinOp BinOp) {
+ switch (BinOp) {
+ case AtomicRMWInst::Xchg: return LLVMAtomicRMWBinOpXchg;
+ case AtomicRMWInst::Add: return LLVMAtomicRMWBinOpAdd;
+ case AtomicRMWInst::Sub: return LLVMAtomicRMWBinOpSub;
+ case AtomicRMWInst::And: return LLVMAtomicRMWBinOpAnd;
+ case AtomicRMWInst::Nand: return LLVMAtomicRMWBinOpNand;
+ case AtomicRMWInst::Or: return LLVMAtomicRMWBinOpOr;
+ case AtomicRMWInst::Xor: return LLVMAtomicRMWBinOpXor;
+ case AtomicRMWInst::Max: return LLVMAtomicRMWBinOpMax;
+ case AtomicRMWInst::Min: return LLVMAtomicRMWBinOpMin;
+ case AtomicRMWInst::UMax: return LLVMAtomicRMWBinOpUMax;
+ case AtomicRMWInst::UMin: return LLVMAtomicRMWBinOpUMin;
+ case AtomicRMWInst::FAdd: return LLVMAtomicRMWBinOpFAdd;
+ case AtomicRMWInst::FSub: return LLVMAtomicRMWBinOpFSub;
+ case AtomicRMWInst::FMax: return LLVMAtomicRMWBinOpFMax;
+ case AtomicRMWInst::FMin: return LLVMAtomicRMWBinOpFMin;
+ default: break;
+ }
+
+ llvm_unreachable("Invalid AtomicRMWBinOp value!");
+}
+
+// TODO: Should this and other atomic instructions support building with
+// "syncscope"?
+LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
+ LLVMBool isSingleThread, const char *Name) {
+ return wrap(
+ unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
+ isSingleThread ? SyncScope::SingleThread
+ : SyncScope::System,
+ Name));
+}
+
+LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ Value *Val = unwrap(Pointer);
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
+ return wrap(unwrap(B)->CreateGEP(Ty, Val, IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(unwrap(B)->CreateGEP(unwrap(Ty), unwrap(Pointer), IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ Value *Val = unwrap(Pointer);
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
+ return wrap(unwrap(B)->CreateInBoundsGEP(Ty, Val, IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildInBoundsGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(
+ unwrap(B)->CreateInBoundsGEP(unwrap(Ty), unwrap(Pointer), IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ unsigned Idx, const char *Name) {
+ Value *Val = unwrap(Pointer);
+ Type *Ty = Val->getType()->getScalarType()->getNonOpaquePointerElementType();
+ return wrap(unwrap(B)->CreateStructGEP(Ty, Val, Idx, Name));
+}
+
+LLVMValueRef LLVMBuildStructGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, unsigned Idx,
+ const char *Name) {
+ return wrap(
+ unwrap(B)->CreateStructGEP(unwrap(Ty), unwrap(Pointer), Idx, Name));
+}
+
+LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateGlobalString(Str, Name));
+}
+
+LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateGlobalStringPtr(Str, Name));
+}
+
+LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->isVolatile();
+ if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ return SI->isVolatile();
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
+ return AI->isVolatile();
+ return cast<AtomicCmpXchgInst>(P)->isVolatile();
+}
+
+void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->setVolatile(isVolatile);
+ if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ return SI->setVolatile(isVolatile);
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
+ return AI->setVolatile(isVolatile);
+ return cast<AtomicCmpXchgInst>(P)->setVolatile(isVolatile);
+}
+
+LLVMBool LLVMGetWeak(LLVMValueRef CmpXchgInst) {
+ return unwrap<AtomicCmpXchgInst>(CmpXchgInst)->isWeak();
+}
+
+void LLVMSetWeak(LLVMValueRef CmpXchgInst, LLVMBool isWeak) {
+ return unwrap<AtomicCmpXchgInst>(CmpXchgInst)->setWeak(isWeak);
+}
+
+LLVMAtomicOrdering LLVMGetOrdering(LLVMValueRef MemAccessInst) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ AtomicOrdering O;
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ O = LI->getOrdering();
+ else if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ O = SI->getOrdering();
+ else
+ O = cast<AtomicRMWInst>(P)->getOrdering();
+ return mapToLLVMOrdering(O);
+}
+
+void LLVMSetOrdering(LLVMValueRef MemAccessInst, LLVMAtomicOrdering Ordering) {
+ Value *P = unwrap<Value>(MemAccessInst);
+ AtomicOrdering O = mapFromLLVMOrdering(Ordering);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(P))
+ return LI->setOrdering(O);
+ return cast<StoreInst>(P)->setOrdering(O);
+}
+
+LLVMAtomicRMWBinOp LLVMGetAtomicRMWBinOp(LLVMValueRef Inst) {
+ return mapToLLVMRMWBinOp(unwrap<AtomicRMWInst>(Inst)->getOperation());
+}
+
+void LLVMSetAtomicRMWBinOp(LLVMValueRef Inst, LLVMAtomicRMWBinOp BinOp) {
+ unwrap<AtomicRMWInst>(Inst)->setOperation(mapFromLLVMRMWBinOp(BinOp));
+}
+
+/*--.. Casts ...............................................................--*/
+
+LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateTrunc(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildZExt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateZExt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildSExt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateSExt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPToUI(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPToUI(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPToSI(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPToSI(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildUIToFP(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateUIToFP(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildSIToFP(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateSIToFP(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPTrunc(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPTrunc(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildFPExt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPExt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildPtrToInt(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreatePtrToInt(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildIntToPtr(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateIntToPtr(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateBitCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildAddrSpaceCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateAddrSpaceCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildZExtOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateZExtOrBitCast(unwrap(Val), unwrap(DestTy),
+ Name));
+}
+
+LLVMValueRef LLVMBuildSExtOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateSExtOrBitCast(unwrap(Val), unwrap(DestTy),
+ Name));
+}
+
+LLVMValueRef LLVMBuildTruncOrBitCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateTruncOrBitCast(unwrap(Val), unwrap(DestTy),
+ Name));
+}
+
+LLVMValueRef LLVMBuildCast(LLVMBuilderRef B, LLVMOpcode Op, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateCast(Instruction::CastOps(map_from_llvmopcode(Op)), unwrap(Val),
+ unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreatePointerCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMValueRef LLVMBuildIntCast2(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, LLVMBool IsSigned,
+ const char *Name) {
+ return wrap(
+ unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy), IsSigned, Name));
+}
+
+LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy),
+ /*isSigned*/true, Name));
+}
+
+LLVMValueRef LLVMBuildFPCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name) {
+ return wrap(unwrap(B)->CreateFPCast(unwrap(Val), unwrap(DestTy), Name));
+}
+
+LLVMOpcode LLVMGetCastOpcode(LLVMValueRef Src, LLVMBool SrcIsSigned,
+ LLVMTypeRef DestTy, LLVMBool DestIsSigned) {
+ return map_to_llvmopcode(CastInst::getCastOpcode(
+ unwrap(Src), SrcIsSigned, unwrap(DestTy), DestIsSigned));
+}
+
+/*--.. Comparisons .........................................................--*/
+
+LLVMValueRef LLVMBuildICmp(LLVMBuilderRef B, LLVMIntPredicate Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateICmp(static_cast<ICmpInst::Predicate>(Op),
+ unwrap(LHS), unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef B, LLVMRealPredicate Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFCmp(static_cast<FCmpInst::Predicate>(Op),
+ unwrap(LHS), unwrap(RHS), Name));
+}
+
+/*--.. Miscellaneous instructions ..........................................--*/
+
+LLVMValueRef LLVMBuildPhi(LLVMBuilderRef B, LLVMTypeRef Ty, const char *Name) {
+ return wrap(unwrap(B)->CreatePHI(unwrap(Ty), 0, Name));
+}
+
+LLVMValueRef LLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name) {
+ Value *V = unwrap(Fn);
+ FunctionType *FnT =
+ cast<FunctionType>(V->getType()->getNonOpaquePointerElementType());
+
+ return wrap(unwrap(B)->CreateCall(FnT, unwrap(Fn),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildCall2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name) {
+ FunctionType *FTy = unwrap<FunctionType>(Ty);
+ return wrap(unwrap(B)->CreateCall(FTy, unwrap(Fn),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildSelect(LLVMBuilderRef B, LLVMValueRef If,
+ LLVMValueRef Then, LLVMValueRef Else,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateSelect(unwrap(If), unwrap(Then), unwrap(Else),
+ Name));
+}
+
+LLVMValueRef LLVMBuildVAArg(LLVMBuilderRef B, LLVMValueRef List,
+ LLVMTypeRef Ty, const char *Name) {
+ return wrap(unwrap(B)->CreateVAArg(unwrap(List), unwrap(Ty), Name));
+}
+
+LLVMValueRef LLVMBuildExtractElement(LLVMBuilderRef B, LLVMValueRef VecVal,
+ LLVMValueRef Index, const char *Name) {
+ return wrap(unwrap(B)->CreateExtractElement(unwrap(VecVal), unwrap(Index),
+ Name));
+}
+
+LLVMValueRef LLVMBuildInsertElement(LLVMBuilderRef B, LLVMValueRef VecVal,
+ LLVMValueRef EltVal, LLVMValueRef Index,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInsertElement(unwrap(VecVal), unwrap(EltVal),
+ unwrap(Index), Name));
+}
+
+LLVMValueRef LLVMBuildShuffleVector(LLVMBuilderRef B, LLVMValueRef V1,
+ LLVMValueRef V2, LLVMValueRef Mask,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateShuffleVector(unwrap(V1), unwrap(V2),
+ unwrap(Mask), Name));
+}
+
+LLVMValueRef LLVMBuildExtractValue(LLVMBuilderRef B, LLVMValueRef AggVal,
+ unsigned Index, const char *Name) {
+ return wrap(unwrap(B)->CreateExtractValue(unwrap(AggVal), Index, Name));
+}
+
+LLVMValueRef LLVMBuildInsertValue(LLVMBuilderRef B, LLVMValueRef AggVal,
+ LLVMValueRef EltVal, unsigned Index,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInsertValue(unwrap(AggVal), unwrap(EltVal),
+ Index, Name));
+}
+
+LLVMValueRef LLVMBuildFreeze(LLVMBuilderRef B, LLVMValueRef Val,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateFreeze(unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildIsNull(LLVMBuilderRef B, LLVMValueRef Val,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateIsNull(unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef B, LLVMValueRef Val,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateIsNotNull(unwrap(Val), Name));
+}
+
+LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef B, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name) {
+ Value *L = unwrap(LHS);
+ Type *ElemTy = L->getType()->getNonOpaquePointerElementType();
+ return wrap(unwrap(B)->CreatePtrDiff(ElemTy, L, unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildPtrDiff2(LLVMBuilderRef B, LLVMTypeRef ElemTy,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name) {
+ return wrap(unwrap(B)->CreatePtrDiff(unwrap(ElemTy), unwrap(LHS),
+ unwrap(RHS), Name));
+}
+
+LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
+ LLVMValueRef PTR, LLVMValueRef Val,
+ LLVMAtomicOrdering ordering,
+ LLVMBool singleThread) {
+ AtomicRMWInst::BinOp intop = mapFromLLVMRMWBinOp(op);
+ return wrap(unwrap(B)->CreateAtomicRMW(
+ intop, unwrap(PTR), unwrap(Val), MaybeAlign(),
+ mapFromLLVMOrdering(ordering),
+ singleThread ? SyncScope::SingleThread : SyncScope::System));
+}
+
+LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
+ LLVMValueRef Cmp, LLVMValueRef New,
+ LLVMAtomicOrdering SuccessOrdering,
+ LLVMAtomicOrdering FailureOrdering,
+ LLVMBool singleThread) {
+
+ return wrap(unwrap(B)->CreateAtomicCmpXchg(
+ unwrap(Ptr), unwrap(Cmp), unwrap(New), MaybeAlign(),
+ mapFromLLVMOrdering(SuccessOrdering),
+ mapFromLLVMOrdering(FailureOrdering),
+ singleThread ? SyncScope::SingleThread : SyncScope::System));
+}
+
+unsigned LLVMGetNumMaskElements(LLVMValueRef SVInst) {
+ Value *P = unwrap<Value>(SVInst);
+ ShuffleVectorInst *I = cast<ShuffleVectorInst>(P);
+ return I->getShuffleMask().size();
+}
+
+int LLVMGetMaskValue(LLVMValueRef SVInst, unsigned Elt) {
+ Value *P = unwrap<Value>(SVInst);
+ ShuffleVectorInst *I = cast<ShuffleVectorInst>(P);
+ return I->getMaskValue(Elt);
+}
+
+int LLVMGetUndefMaskElem(void) { return UndefMaskElem; }
+
+LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) {
+ Value *P = unwrap<Value>(AtomicInst);
+
+ if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
+ return I->getSyncScopeID() == SyncScope::SingleThread;
+ return cast<AtomicCmpXchgInst>(P)->getSyncScopeID() ==
+ SyncScope::SingleThread;
+}
+
+void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool NewValue) {
+ Value *P = unwrap<Value>(AtomicInst);
+ SyncScope::ID SSID = NewValue ? SyncScope::SingleThread : SyncScope::System;
+
+ if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
+ return I->setSyncScopeID(SSID);
+ return cast<AtomicCmpXchgInst>(P)->setSyncScopeID(SSID);
+}
+
+LLVMAtomicOrdering LLVMGetCmpXchgSuccessOrdering(LLVMValueRef CmpXchgInst) {
+ Value *P = unwrap<Value>(CmpXchgInst);
+ return mapToLLVMOrdering(cast<AtomicCmpXchgInst>(P)->getSuccessOrdering());
+}
+
+void LLVMSetCmpXchgSuccessOrdering(LLVMValueRef CmpXchgInst,
+ LLVMAtomicOrdering Ordering) {
+ Value *P = unwrap<Value>(CmpXchgInst);
+ AtomicOrdering O = mapFromLLVMOrdering(Ordering);
+
+ return cast<AtomicCmpXchgInst>(P)->setSuccessOrdering(O);
+}
+
+LLVMAtomicOrdering LLVMGetCmpXchgFailureOrdering(LLVMValueRef CmpXchgInst) {
+ Value *P = unwrap<Value>(CmpXchgInst);
+ return mapToLLVMOrdering(cast<AtomicCmpXchgInst>(P)->getFailureOrdering());
+}
+
+void LLVMSetCmpXchgFailureOrdering(LLVMValueRef CmpXchgInst,
+ LLVMAtomicOrdering Ordering) {
+ Value *P = unwrap<Value>(CmpXchgInst);
+ AtomicOrdering O = mapFromLLVMOrdering(Ordering);
+
+ return cast<AtomicCmpXchgInst>(P)->setFailureOrdering(O);
+}
+
+/*===-- Module providers --------------------------------------------------===*/
+
+LLVMModuleProviderRef
+LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M) {
+ return reinterpret_cast<LLVMModuleProviderRef>(M);
+}
+
+void LLVMDisposeModuleProvider(LLVMModuleProviderRef MP) {
+ delete unwrap(MP);
+}
+
+
+/*===-- Memory buffers ----------------------------------------------------===*/
+
+LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(
+ const char *Path,
+ LLVMMemoryBufferRef *OutMemBuf,
+ char **OutMessage) {
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getFile(Path);
+ if (std::error_code EC = MBOrErr.getError()) {
+ *OutMessage = strdup(EC.message().c_str());
+ return 1;
+ }
+ *OutMemBuf = wrap(MBOrErr.get().release());
+ return 0;
+}
+
+LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
+ char **OutMessage) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getSTDIN();
+ if (std::error_code EC = MBOrErr.getError()) {
+ *OutMessage = strdup(EC.message().c_str());
+ return 1;
+ }
+ *OutMemBuf = wrap(MBOrErr.get().release());
+ return 0;
+}
+
+LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRange(
+ const char *InputData,
+ size_t InputDataLength,
+ const char *BufferName,
+ LLVMBool RequiresNullTerminator) {
+
+ return wrap(MemoryBuffer::getMemBuffer(StringRef(InputData, InputDataLength),
+ StringRef(BufferName),
+ RequiresNullTerminator).release());
+}
+
+LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRangeCopy(
+ const char *InputData,
+ size_t InputDataLength,
+ const char *BufferName) {
+
+ return wrap(
+ MemoryBuffer::getMemBufferCopy(StringRef(InputData, InputDataLength),
+ StringRef(BufferName)).release());
+}
+
+const char *LLVMGetBufferStart(LLVMMemoryBufferRef MemBuf) {
+ return unwrap(MemBuf)->getBufferStart();
+}
+
+size_t LLVMGetBufferSize(LLVMMemoryBufferRef MemBuf) {
+ return unwrap(MemBuf)->getBufferSize();
+}
+
+void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) {
+ delete unwrap(MemBuf);
+}
+
+/*===-- Pass Registry -----------------------------------------------------===*/
+
+LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void) {
+ return wrap(PassRegistry::getPassRegistry());
+}
+
+/*===-- Pass Manager ------------------------------------------------------===*/
+
+LLVMPassManagerRef LLVMCreatePassManager() {
+ return wrap(new legacy::PassManager());
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) {
+ return wrap(new legacy::FunctionPassManager(unwrap(M)));
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) {
+ return LLVMCreateFunctionPassManagerForModule(
+ reinterpret_cast<LLVMModuleRef>(P));
+}
+
+LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
+ return unwrap<legacy::PassManager>(PM)->run(*unwrap(M));
+}
+
+LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) {
+ return unwrap<legacy::FunctionPassManager>(FPM)->doInitialization();
+}
+
+LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) {
+ return unwrap<legacy::FunctionPassManager>(FPM)->run(*unwrap<Function>(F));
+}
+
+LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) {
+ return unwrap<legacy::FunctionPassManager>(FPM)->doFinalization();
+}
+
+void LLVMDisposePassManager(LLVMPassManagerRef PM) {
+ delete unwrap(PM);
+}
+
+/*===-- Threading ------------------------------------------------------===*/
+
+LLVMBool LLVMStartMultithreaded() {
+ return LLVMIsMultithreaded();
+}
+
+void LLVMStopMultithreaded() {
+}
+
+LLVMBool LLVMIsMultithreaded() {
+ return llvm_is_multithreaded();
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp b/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp
new file mode 100644
index 000000000000..34ffc9425281
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DIBuilder.cpp
@@ -0,0 +1,1157 @@
+//===--- DIBuilder.cpp - Debug Information Builder ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DIBuilder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DIBuilder.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+using namespace llvm::dwarf;
+
+static cl::opt<bool>
+ UseDbgAddr("use-dbg-addr",
+ llvm::cl::desc("Use llvm.dbg.addr for all local variables"),
+ cl::init(false), cl::Hidden);
+
+DIBuilder::DIBuilder(Module &m, bool AllowUnresolvedNodes, DICompileUnit *CU)
+ : M(m), VMContext(M.getContext()), CUNode(CU), DeclareFn(nullptr),
+ ValueFn(nullptr), LabelFn(nullptr), AddrFn(nullptr),
+ AllowUnresolvedNodes(AllowUnresolvedNodes) {
+ if (CUNode) {
+ if (const auto &ETs = CUNode->getEnumTypes())
+ AllEnumTypes.assign(ETs.begin(), ETs.end());
+ if (const auto &RTs = CUNode->getRetainedTypes())
+ AllRetainTypes.assign(RTs.begin(), RTs.end());
+ if (const auto &GVs = CUNode->getGlobalVariables())
+ AllGVs.assign(GVs.begin(), GVs.end());
+ if (const auto &IMs = CUNode->getImportedEntities())
+ AllImportedModules.assign(IMs.begin(), IMs.end());
+ if (const auto &MNs = CUNode->getMacros())
+ AllMacrosPerParent.insert({nullptr, {MNs.begin(), MNs.end()}});
+ }
+}
+
+void DIBuilder::trackIfUnresolved(MDNode *N) {
+ if (!N)
+ return;
+ if (N->isResolved())
+ return;
+
+ assert(AllowUnresolvedNodes && "Cannot handle unresolved nodes");
+ UnresolvedNodes.emplace_back(N);
+}
+
+void DIBuilder::finalizeSubprogram(DISubprogram *SP) {
+ MDTuple *Temp = SP->getRetainedNodes().get();
+ if (!Temp || !Temp->isTemporary())
+ return;
+
+ SmallVector<Metadata *, 16> RetainedNodes;
+
+ auto PV = PreservedVariables.find(SP);
+ if (PV != PreservedVariables.end())
+ RetainedNodes.append(PV->second.begin(), PV->second.end());
+
+ auto PL = PreservedLabels.find(SP);
+ if (PL != PreservedLabels.end())
+ RetainedNodes.append(PL->second.begin(), PL->second.end());
+
+ DINodeArray Node = getOrCreateArray(RetainedNodes);
+
+ TempMDTuple(Temp)->replaceAllUsesWith(Node.get());
+}
+
+void DIBuilder::finalize() {
+ if (!CUNode) {
+ assert(!AllowUnresolvedNodes &&
+ "creating type nodes without a CU is not supported");
+ return;
+ }
+
+ if (!AllEnumTypes.empty())
+ CUNode->replaceEnumTypes(MDTuple::get(VMContext, AllEnumTypes));
+
+ SmallVector<Metadata *, 16> RetainValues;
+ // Declarations and definitions of the same type may be retained. Some
+ // clients RAUW these pairs, leaving duplicates in the retained types
+ // list. Use a set to remove the duplicates while we transform the
+ // TrackingVHs back into Values.
+ SmallPtrSet<Metadata *, 16> RetainSet;
+ for (unsigned I = 0, E = AllRetainTypes.size(); I < E; I++)
+ if (RetainSet.insert(AllRetainTypes[I]).second)
+ RetainValues.push_back(AllRetainTypes[I]);
+
+ if (!RetainValues.empty())
+ CUNode->replaceRetainedTypes(MDTuple::get(VMContext, RetainValues));
+
+ DISubprogramArray SPs = MDTuple::get(VMContext, AllSubprograms);
+ for (auto *SP : SPs)
+ finalizeSubprogram(SP);
+ for (auto *N : RetainValues)
+ if (auto *SP = dyn_cast<DISubprogram>(N))
+ finalizeSubprogram(SP);
+
+ if (!AllGVs.empty())
+ CUNode->replaceGlobalVariables(MDTuple::get(VMContext, AllGVs));
+
+ if (!AllImportedModules.empty())
+ CUNode->replaceImportedEntities(MDTuple::get(
+ VMContext, SmallVector<Metadata *, 16>(AllImportedModules.begin(),
+ AllImportedModules.end())));
+
+ for (const auto &I : AllMacrosPerParent) {
+ // DIMacroNode's with nullptr parent are DICompileUnit direct children.
+ if (!I.first) {
+ CUNode->replaceMacros(MDTuple::get(VMContext, I.second.getArrayRef()));
+ continue;
+ }
+ // Otherwise, it must be a temporary DIMacroFile that need to be resolved.
+ auto *TMF = cast<DIMacroFile>(I.first);
+ auto *MF = DIMacroFile::get(VMContext, dwarf::DW_MACINFO_start_file,
+ TMF->getLine(), TMF->getFile(),
+ getOrCreateMacroArray(I.second.getArrayRef()));
+ replaceTemporary(llvm::TempDIMacroNode(TMF), MF);
+ }
+
+ // Now that all temp nodes have been replaced or deleted, resolve remaining
+ // cycles.
+ for (const auto &N : UnresolvedNodes)
+ if (N && !N->isResolved())
+ N->resolveCycles();
+ UnresolvedNodes.clear();
+
+ // Can't handle unresolved nodes anymore.
+ AllowUnresolvedNodes = false;
+}
+
+/// If N is compile unit return NULL otherwise return N.
+static DIScope *getNonCompileUnitScope(DIScope *N) {
+ if (!N || isa<DICompileUnit>(N))
+ return nullptr;
+ return cast<DIScope>(N);
+}
+
+DICompileUnit *DIBuilder::createCompileUnit(
+ unsigned Lang, DIFile *File, StringRef Producer, bool isOptimized,
+ StringRef Flags, unsigned RunTimeVer, StringRef SplitName,
+ DICompileUnit::DebugEmissionKind Kind, uint64_t DWOId,
+ bool SplitDebugInlining, bool DebugInfoForProfiling,
+ DICompileUnit::DebugNameTableKind NameTableKind, bool RangesBaseAddress,
+ StringRef SysRoot, StringRef SDK) {
+
+ assert(((Lang <= dwarf::DW_LANG_Fortran08 && Lang >= dwarf::DW_LANG_C89) ||
+ (Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
+ "Invalid Language tag");
+
+ assert(!CUNode && "Can only make one compile unit per DIBuilder instance");
+ CUNode = DICompileUnit::getDistinct(
+ VMContext, Lang, File, Producer, isOptimized, Flags, RunTimeVer,
+ SplitName, Kind, nullptr, nullptr, nullptr, nullptr, nullptr, DWOId,
+ SplitDebugInlining, DebugInfoForProfiling, NameTableKind,
+ RangesBaseAddress, SysRoot, SDK);
+
+ // Create a named metadata so that it is easier to find cu in a module.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
+ NMD->addOperand(CUNode);
+ trackIfUnresolved(CUNode);
+ return CUNode;
+}
+
+static DIImportedEntity *
+createImportedModule(LLVMContext &C, dwarf::Tag Tag, DIScope *Context,
+ Metadata *NS, DIFile *File, unsigned Line, StringRef Name,
+ DINodeArray Elements,
+ SmallVectorImpl<TrackingMDNodeRef> &AllImportedModules) {
+ if (Line)
+ assert(File && "Source location has line number but no file");
+ unsigned EntitiesCount = C.pImpl->DIImportedEntitys.size();
+ auto *M = DIImportedEntity::get(C, Tag, Context, cast_or_null<DINode>(NS),
+ File, Line, Name, Elements);
+ if (EntitiesCount < C.pImpl->DIImportedEntitys.size())
+ // A new Imported Entity was just added to the context.
+ // Add it to the Imported Modules list.
+ AllImportedModules.emplace_back(M);
+ return M;
+}
+
+DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context,
+ DINamespace *NS, DIFile *File,
+ unsigned Line,
+ DINodeArray Elements) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, NS, File, Line, StringRef(), Elements,
+ AllImportedModules);
+}
+
+DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context,
+ DIImportedEntity *NS,
+ DIFile *File, unsigned Line,
+ DINodeArray Elements) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, NS, File, Line, StringRef(), Elements,
+ AllImportedModules);
+}
+
+DIImportedEntity *DIBuilder::createImportedModule(DIScope *Context, DIModule *M,
+ DIFile *File, unsigned Line,
+ DINodeArray Elements) {
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_module,
+ Context, M, File, Line, StringRef(), Elements,
+ AllImportedModules);
+}
+
+DIImportedEntity *
+DIBuilder::createImportedDeclaration(DIScope *Context, DINode *Decl,
+ DIFile *File, unsigned Line,
+ StringRef Name, DINodeArray Elements) {
+ // Make sure to use the unique identifier based metadata reference for
+ // types that have one.
+ return ::createImportedModule(VMContext, dwarf::DW_TAG_imported_declaration,
+ Context, Decl, File, Line, Name, Elements,
+ AllImportedModules);
+}
+
+DIFile *DIBuilder::createFile(StringRef Filename, StringRef Directory,
+ Optional<DIFile::ChecksumInfo<StringRef>> CS,
+ Optional<StringRef> Source) {
+ return DIFile::get(VMContext, Filename, Directory, CS, Source);
+}
+
+DIMacro *DIBuilder::createMacro(DIMacroFile *Parent, unsigned LineNumber,
+ unsigned MacroType, StringRef Name,
+ StringRef Value) {
+ assert(!Name.empty() && "Unable to create macro without name");
+ assert((MacroType == dwarf::DW_MACINFO_undef ||
+ MacroType == dwarf::DW_MACINFO_define) &&
+ "Unexpected macro type");
+ auto *M = DIMacro::get(VMContext, MacroType, LineNumber, Name, Value);
+ AllMacrosPerParent[Parent].insert(M);
+ return M;
+}
+
+DIMacroFile *DIBuilder::createTempMacroFile(DIMacroFile *Parent,
+ unsigned LineNumber, DIFile *File) {
+ auto *MF = DIMacroFile::getTemporary(VMContext, dwarf::DW_MACINFO_start_file,
+ LineNumber, File, DIMacroNodeArray())
+ .release();
+ AllMacrosPerParent[Parent].insert(MF);
+ // Add the new temporary DIMacroFile to the macro per parent map as a parent.
+ // This is needed to assure DIMacroFile with no children to have an entry in
+ // the map. Otherwise, it will not be resolved in DIBuilder::finalize().
+ AllMacrosPerParent.insert({MF, {}});
+ return MF;
+}
+
+DIEnumerator *DIBuilder::createEnumerator(StringRef Name, uint64_t Val,
+ bool IsUnsigned) {
+ assert(!Name.empty() && "Unable to create enumerator without name");
+ return DIEnumerator::get(VMContext, APInt(64, Val, !IsUnsigned), IsUnsigned,
+ Name);
+}
+
+DIEnumerator *DIBuilder::createEnumerator(StringRef Name, const APSInt &Value) {
+ assert(!Name.empty() && "Unable to create enumerator without name");
+ return DIEnumerator::get(VMContext, APInt(Value), Value.isUnsigned(), Name);
+}
+
+DIBasicType *DIBuilder::createUnspecifiedType(StringRef Name) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIBasicType::get(VMContext, dwarf::DW_TAG_unspecified_type, Name);
+}
+
+DIBasicType *DIBuilder::createNullPtrType() {
+ return createUnspecifiedType("decltype(nullptr)");
+}
+
+DIBasicType *DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
+ unsigned Encoding,
+ DINode::DIFlags Flags) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIBasicType::get(VMContext, dwarf::DW_TAG_base_type, Name, SizeInBits,
+ 0, Encoding, Flags);
+}
+
+DIStringType *DIBuilder::createStringType(StringRef Name, uint64_t SizeInBits) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIStringType::get(VMContext, dwarf::DW_TAG_string_type, Name,
+ SizeInBits, 0);
+}
+
+DIStringType *DIBuilder::createStringType(StringRef Name,
+ DIVariable *StringLength,
+ DIExpression *StrLocationExp) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIStringType::get(VMContext, dwarf::DW_TAG_string_type, Name,
+ StringLength, nullptr, StrLocationExp, 0, 0, 0);
+}
+
+DIStringType *DIBuilder::createStringType(StringRef Name,
+ DIExpression *StringLengthExp,
+ DIExpression *StrLocationExp) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIStringType::get(VMContext, dwarf::DW_TAG_string_type, Name, nullptr,
+ StringLengthExp, StrLocationExp, 0, 0, 0);
+}
+
+DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) {
+ return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, FromTy, 0,
+ 0, 0, None, DINode::FlagZero);
+}
+
+DIDerivedType *
+DIBuilder::createPointerType(DIType *PointeeTy, uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ Optional<unsigned> DWARFAddressSpace,
+ StringRef Name, DINodeArray Annotations) {
+ // FIXME: Why is there a name here?
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_pointer_type, Name,
+ nullptr, 0, nullptr, PointeeTy, SizeInBits,
+ AlignInBits, 0, DWARFAddressSpace, DINode::FlagZero,
+ nullptr, Annotations);
+}
+
+DIDerivedType *DIBuilder::createMemberPointerType(DIType *PointeeTy,
+ DIType *Base,
+ uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ DINode::DIFlags Flags) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_ptr_to_member_type, "",
+ nullptr, 0, nullptr, PointeeTy, SizeInBits,
+ AlignInBits, 0, None, Flags, Base);
+}
+
+DIDerivedType *
+DIBuilder::createReferenceType(unsigned Tag, DIType *RTy, uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ Optional<unsigned> DWARFAddressSpace) {
+ assert(RTy && "Unable to create reference type");
+ return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, RTy,
+ SizeInBits, AlignInBits, 0, DWARFAddressSpace,
+ DINode::FlagZero);
+}
+
+DIDerivedType *DIBuilder::createTypedef(DIType *Ty, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ DIScope *Context, uint32_t AlignInBits,
+ DINodeArray Annotations) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_typedef, Name, File,
+ LineNo, getNonCompileUnitScope(Context), Ty, 0,
+ AlignInBits, 0, None, DINode::FlagZero, nullptr,
+ Annotations);
+}
+
+DIDerivedType *DIBuilder::createFriend(DIType *Ty, DIType *FriendTy) {
+ assert(Ty && "Invalid type!");
+ assert(FriendTy && "Invalid friend type!");
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_friend, "", nullptr, 0, Ty,
+ FriendTy, 0, 0, 0, None, DINode::FlagZero);
+}
+
+DIDerivedType *DIBuilder::createInheritance(DIType *Ty, DIType *BaseTy,
+ uint64_t BaseOffset,
+ uint32_t VBPtrOffset,
+ DINode::DIFlags Flags) {
+ assert(Ty && "Unable to create inheritance");
+ Metadata *ExtraData = ConstantAsMetadata::get(
+ ConstantInt::get(IntegerType::get(VMContext, 32), VBPtrOffset));
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_inheritance, "", nullptr,
+ 0, Ty, BaseTy, 0, 0, BaseOffset, None, Flags,
+ ExtraData);
+}
+
+DIDerivedType *DIBuilder::createMemberType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
+ LineNumber, getNonCompileUnitScope(Scope), Ty,
+ SizeInBits, AlignInBits, OffsetInBits, None, Flags,
+ nullptr, Annotations);
+}
+
+static ConstantAsMetadata *getConstantOrNull(Constant *C) {
+ if (C)
+ return ConstantAsMetadata::get(C);
+ return nullptr;
+}
+
+DIDerivedType *DIBuilder::createVariantMemberType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ Constant *Discriminant, DINode::DIFlags Flags, DIType *Ty) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
+ LineNumber, getNonCompileUnitScope(Scope), Ty,
+ SizeInBits, AlignInBits, OffsetInBits, None, Flags,
+ getConstantOrNull(Discriminant));
+}
+
+DIDerivedType *DIBuilder::createBitFieldMemberType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t OffsetInBits, uint64_t StorageOffsetInBits,
+ DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations) {
+ Flags |= DINode::FlagBitField;
+ return DIDerivedType::get(
+ VMContext, dwarf::DW_TAG_member, Name, File, LineNumber,
+ getNonCompileUnitScope(Scope), Ty, SizeInBits, /*AlignInBits=*/0,
+ OffsetInBits, None, Flags,
+ ConstantAsMetadata::get(ConstantInt::get(IntegerType::get(VMContext, 64),
+ StorageOffsetInBits)),
+ Annotations);
+}
+
+DIDerivedType *
+DIBuilder::createStaticMemberType(DIScope *Scope, StringRef Name, DIFile *File,
+ unsigned LineNumber, DIType *Ty,
+ DINode::DIFlags Flags, llvm::Constant *Val,
+ uint32_t AlignInBits) {
+ Flags |= DINode::FlagStaticMember;
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
+ LineNumber, getNonCompileUnitScope(Scope), Ty, 0,
+ AlignInBits, 0, None, Flags,
+ getConstantOrNull(Val));
+}
+
+DIDerivedType *
+DIBuilder::createObjCIVar(StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, DINode::DIFlags Flags,
+ DIType *Ty, MDNode *PropertyNode) {
+ return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
+ LineNumber, getNonCompileUnitScope(File), Ty,
+ SizeInBits, AlignInBits, OffsetInBits, None, Flags,
+ PropertyNode);
+}
+
+DIObjCProperty *
+DIBuilder::createObjCProperty(StringRef Name, DIFile *File, unsigned LineNumber,
+ StringRef GetterName, StringRef SetterName,
+ unsigned PropertyAttributes, DIType *Ty) {
+ return DIObjCProperty::get(VMContext, Name, File, LineNumber, GetterName,
+ SetterName, PropertyAttributes, Ty);
+}
+
+DITemplateTypeParameter *
+DIBuilder::createTemplateTypeParameter(DIScope *Context, StringRef Name,
+ DIType *Ty, bool isDefault) {
+ assert((!Context || isa<DICompileUnit>(Context)) && "Expected compile unit");
+ return DITemplateTypeParameter::get(VMContext, Name, Ty, isDefault);
+}
+
+static DITemplateValueParameter *
+createTemplateValueParameterHelper(LLVMContext &VMContext, unsigned Tag,
+ DIScope *Context, StringRef Name, DIType *Ty,
+ bool IsDefault, Metadata *MD) {
+ assert((!Context || isa<DICompileUnit>(Context)) && "Expected compile unit");
+ return DITemplateValueParameter::get(VMContext, Tag, Name, Ty, IsDefault, MD);
+}
+
+DITemplateValueParameter *
+DIBuilder::createTemplateValueParameter(DIScope *Context, StringRef Name,
+ DIType *Ty, bool isDefault,
+ Constant *Val) {
+ return createTemplateValueParameterHelper(
+ VMContext, dwarf::DW_TAG_template_value_parameter, Context, Name, Ty,
+ isDefault, getConstantOrNull(Val));
+}
+
+DITemplateValueParameter *
+DIBuilder::createTemplateTemplateParameter(DIScope *Context, StringRef Name,
+ DIType *Ty, StringRef Val) {
+ return createTemplateValueParameterHelper(
+ VMContext, dwarf::DW_TAG_GNU_template_template_param, Context, Name, Ty,
+ false, MDString::get(VMContext, Val));
+}
+
+DITemplateValueParameter *
+DIBuilder::createTemplateParameterPack(DIScope *Context, StringRef Name,
+ DIType *Ty, DINodeArray Val) {
+ return createTemplateValueParameterHelper(
+ VMContext, dwarf::DW_TAG_GNU_template_parameter_pack, Context, Name, Ty,
+ false, Val.get());
+}
+
+DICompositeType *DIBuilder::createClassType(
+ DIScope *Context, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements,
+ DIType *VTableHolder, MDNode *TemplateParams, StringRef UniqueIdentifier) {
+ assert((!Context || isa<DIScope>(Context)) &&
+ "createClassType should be called with a valid Context");
+
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_structure_type, Name, File, LineNumber,
+ getNonCompileUnitScope(Context), DerivedFrom, SizeInBits, AlignInBits,
+ OffsetInBits, Flags, Elements, 0, VTableHolder,
+ cast_or_null<MDTuple>(TemplateParams), UniqueIdentifier);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *DIBuilder::createStructType(
+ DIScope *Context, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags,
+ DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang,
+ DIType *VTableHolder, StringRef UniqueIdentifier) {
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_structure_type, Name, File, LineNumber,
+ getNonCompileUnitScope(Context), DerivedFrom, SizeInBits, AlignInBits, 0,
+ Flags, Elements, RunTimeLang, VTableHolder, nullptr, UniqueIdentifier);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *DIBuilder::createUnionType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags,
+ DINodeArray Elements, unsigned RunTimeLang, StringRef UniqueIdentifier) {
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_union_type, Name, File, LineNumber,
+ getNonCompileUnitScope(Scope), nullptr, SizeInBits, AlignInBits, 0, Flags,
+ Elements, RunTimeLang, nullptr, nullptr, UniqueIdentifier);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *
+DIBuilder::createVariantPart(DIScope *Scope, StringRef Name, DIFile *File,
+ unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, DINode::DIFlags Flags,
+ DIDerivedType *Discriminator, DINodeArray Elements,
+ StringRef UniqueIdentifier) {
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_variant_part, Name, File, LineNumber,
+ getNonCompileUnitScope(Scope), nullptr, SizeInBits, AlignInBits, 0, Flags,
+ Elements, 0, nullptr, nullptr, UniqueIdentifier, Discriminator);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DISubroutineType *DIBuilder::createSubroutineType(DITypeRefArray ParameterTypes,
+ DINode::DIFlags Flags,
+ unsigned CC) {
+ return DISubroutineType::get(VMContext, Flags, CC, ParameterTypes);
+}
+
+DICompositeType *DIBuilder::createEnumerationType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
+ DIType *UnderlyingType, StringRef UniqueIdentifier, bool IsScoped) {
+ auto *CTy = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_enumeration_type, Name, File, LineNumber,
+ getNonCompileUnitScope(Scope), UnderlyingType, SizeInBits, AlignInBits, 0,
+ IsScoped ? DINode::FlagEnumClass : DINode::FlagZero, Elements, 0, nullptr,
+ nullptr, UniqueIdentifier);
+ AllEnumTypes.push_back(CTy);
+ trackIfUnresolved(CTy);
+ return CTy;
+}
+
+DIDerivedType *DIBuilder::createSetType(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ uint64_t SizeInBits,
+ uint32_t AlignInBits, DIType *Ty) {
+ auto *R =
+ DIDerivedType::get(VMContext, dwarf::DW_TAG_set_type, Name, File, LineNo,
+ getNonCompileUnitScope(Scope), Ty, SizeInBits,
+ AlignInBits, 0, None, DINode::FlagZero);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *
+DIBuilder::createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty,
+ DINodeArray Subscripts,
+ PointerUnion<DIExpression *, DIVariable *> DL,
+ PointerUnion<DIExpression *, DIVariable *> AS,
+ PointerUnion<DIExpression *, DIVariable *> AL,
+ PointerUnion<DIExpression *, DIVariable *> RK) {
+ auto *R = DICompositeType::get(
+ VMContext, dwarf::DW_TAG_array_type, "", nullptr, 0, nullptr, Ty, Size,
+ AlignInBits, 0, DINode::FlagZero, Subscripts, 0, nullptr, nullptr, "",
+ nullptr,
+ DL.is<DIExpression *>() ? (Metadata *)DL.get<DIExpression *>()
+ : (Metadata *)DL.get<DIVariable *>(),
+ AS.is<DIExpression *>() ? (Metadata *)AS.get<DIExpression *>()
+ : (Metadata *)AS.get<DIVariable *>(),
+ AL.is<DIExpression *>() ? (Metadata *)AL.get<DIExpression *>()
+ : (Metadata *)AL.get<DIVariable *>(),
+ RK.is<DIExpression *>() ? (Metadata *)RK.get<DIExpression *>()
+ : (Metadata *)RK.get<DIVariable *>());
+ trackIfUnresolved(R);
+ return R;
+}
+
+DICompositeType *DIBuilder::createVectorType(uint64_t Size,
+ uint32_t AlignInBits, DIType *Ty,
+ DINodeArray Subscripts) {
+ auto *R = DICompositeType::get(VMContext, dwarf::DW_TAG_array_type, "",
+ nullptr, 0, nullptr, Ty, Size, AlignInBits, 0,
+ DINode::FlagVector, Subscripts, 0, nullptr);
+ trackIfUnresolved(R);
+ return R;
+}
+
+DISubprogram *DIBuilder::createArtificialSubprogram(DISubprogram *SP) {
+ auto NewSP = SP->cloneWithFlags(SP->getFlags() | DINode::FlagArtificial);
+ return MDNode::replaceWithDistinct(std::move(NewSP));
+}
+
+static DIType *createTypeWithFlags(const DIType *Ty,
+ DINode::DIFlags FlagsToSet) {
+ auto NewTy = Ty->cloneWithFlags(Ty->getFlags() | FlagsToSet);
+ return MDNode::replaceWithUniqued(std::move(NewTy));
+}
+
+DIType *DIBuilder::createArtificialType(DIType *Ty) {
+ // FIXME: Restrict this to the nodes where it's valid.
+ if (Ty->isArtificial())
+ return Ty;
+ return createTypeWithFlags(Ty, DINode::FlagArtificial);
+}
+
+DIType *DIBuilder::createObjectPointerType(DIType *Ty) {
+ // FIXME: Restrict this to the nodes where it's valid.
+ if (Ty->isObjectPointer())
+ return Ty;
+ DINode::DIFlags Flags = DINode::FlagObjectPointer | DINode::FlagArtificial;
+ return createTypeWithFlags(Ty, Flags);
+}
+
+void DIBuilder::retainType(DIScope *T) {
+ assert(T && "Expected non-null type");
+ assert((isa<DIType>(T) || (isa<DISubprogram>(T) &&
+ cast<DISubprogram>(T)->isDefinition() == false)) &&
+ "Expected type or subprogram declaration");
+ AllRetainTypes.emplace_back(T);
+}
+
+DIBasicType *DIBuilder::createUnspecifiedParameter() { return nullptr; }
+
+DICompositeType *
+DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIScope *Scope,
+ DIFile *F, unsigned Line, unsigned RuntimeLang,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ StringRef UniqueIdentifier) {
+ // FIXME: Define in terms of createReplaceableForwardDecl() by calling
+ // replaceWithUniqued().
+ auto *RetTy = DICompositeType::get(
+ VMContext, Tag, Name, F, Line, getNonCompileUnitScope(Scope), nullptr,
+ SizeInBits, AlignInBits, 0, DINode::FlagFwdDecl, nullptr, RuntimeLang,
+ nullptr, nullptr, UniqueIdentifier);
+ trackIfUnresolved(RetTy);
+ return RetTy;
+}
+
+DICompositeType *DIBuilder::createReplaceableCompositeType(
+ unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line,
+ unsigned RuntimeLang, uint64_t SizeInBits, uint32_t AlignInBits,
+ DINode::DIFlags Flags, StringRef UniqueIdentifier,
+ DINodeArray Annotations) {
+ auto *RetTy =
+ DICompositeType::getTemporary(
+ VMContext, Tag, Name, F, Line, getNonCompileUnitScope(Scope), nullptr,
+ SizeInBits, AlignInBits, 0, Flags, nullptr, RuntimeLang, nullptr,
+ nullptr, UniqueIdentifier, nullptr, nullptr, nullptr, nullptr,
+ nullptr, Annotations)
+ .release();
+ trackIfUnresolved(RetTy);
+ return RetTy;
+}
+
+DINodeArray DIBuilder::getOrCreateArray(ArrayRef<Metadata *> Elements) {
+ return MDTuple::get(VMContext, Elements);
+}
+
+DIMacroNodeArray
+DIBuilder::getOrCreateMacroArray(ArrayRef<Metadata *> Elements) {
+ return MDTuple::get(VMContext, Elements);
+}
+
+DITypeRefArray DIBuilder::getOrCreateTypeArray(ArrayRef<Metadata *> Elements) {
+ SmallVector<llvm::Metadata *, 16> Elts;
+ for (Metadata *E : Elements) {
+ if (isa_and_nonnull<MDNode>(E))
+ Elts.push_back(cast<DIType>(E));
+ else
+ Elts.push_back(E);
+ }
+ return DITypeRefArray(MDNode::get(VMContext, Elts));
+}
+
+DISubrange *DIBuilder::getOrCreateSubrange(int64_t Lo, int64_t Count) {
+ auto *LB = ConstantAsMetadata::get(
+ ConstantInt::getSigned(Type::getInt64Ty(VMContext), Lo));
+ auto *CountNode = ConstantAsMetadata::get(
+ ConstantInt::getSigned(Type::getInt64Ty(VMContext), Count));
+ return DISubrange::get(VMContext, CountNode, LB, nullptr, nullptr);
+}
+
+DISubrange *DIBuilder::getOrCreateSubrange(int64_t Lo, Metadata *CountNode) {
+ auto *LB = ConstantAsMetadata::get(
+ ConstantInt::getSigned(Type::getInt64Ty(VMContext), Lo));
+ return DISubrange::get(VMContext, CountNode, LB, nullptr, nullptr);
+}
+
+DISubrange *DIBuilder::getOrCreateSubrange(Metadata *CountNode, Metadata *LB,
+ Metadata *UB, Metadata *Stride) {
+ return DISubrange::get(VMContext, CountNode, LB, UB, Stride);
+}
+
+DIGenericSubrange *DIBuilder::getOrCreateGenericSubrange(
+ DIGenericSubrange::BoundType CountNode, DIGenericSubrange::BoundType LB,
+ DIGenericSubrange::BoundType UB, DIGenericSubrange::BoundType Stride) {
+ auto ConvToMetadata = [&](DIGenericSubrange::BoundType Bound) -> Metadata * {
+ return Bound.is<DIExpression *>() ? (Metadata *)Bound.get<DIExpression *>()
+ : (Metadata *)Bound.get<DIVariable *>();
+ };
+ return DIGenericSubrange::get(VMContext, ConvToMetadata(CountNode),
+ ConvToMetadata(LB), ConvToMetadata(UB),
+ ConvToMetadata(Stride));
+}
+
+static void checkGlobalVariableScope(DIScope *Context) {
+#ifndef NDEBUG
+ if (auto *CT =
+ dyn_cast_or_null<DICompositeType>(getNonCompileUnitScope(Context)))
+ assert(CT->getIdentifier().empty() &&
+ "Context of a global variable should not be a type with identifier");
+#endif
+}
+
+DIGlobalVariableExpression *DIBuilder::createGlobalVariableExpression(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
+ unsigned LineNumber, DIType *Ty, bool IsLocalToUnit, bool isDefined,
+ DIExpression *Expr, MDNode *Decl, MDTuple *TemplateParams,
+ uint32_t AlignInBits, DINodeArray Annotations) {
+ checkGlobalVariableScope(Context);
+
+ auto *GV = DIGlobalVariable::getDistinct(
+ VMContext, cast_or_null<DIScope>(Context), Name, LinkageName, F,
+ LineNumber, Ty, IsLocalToUnit, isDefined,
+ cast_or_null<DIDerivedType>(Decl), TemplateParams, AlignInBits,
+ Annotations);
+ if (!Expr)
+ Expr = createExpression();
+ auto *N = DIGlobalVariableExpression::get(VMContext, GV, Expr);
+ AllGVs.push_back(N);
+ return N;
+}
+
+DIGlobalVariable *DIBuilder::createTempGlobalVariableFwdDecl(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
+ unsigned LineNumber, DIType *Ty, bool IsLocalToUnit, MDNode *Decl,
+ MDTuple *TemplateParams, uint32_t AlignInBits) {
+ checkGlobalVariableScope(Context);
+
+ return DIGlobalVariable::getTemporary(
+ VMContext, cast_or_null<DIScope>(Context), Name, LinkageName, F,
+ LineNumber, Ty, IsLocalToUnit, false,
+ cast_or_null<DIDerivedType>(Decl), TemplateParams, AlignInBits,
+ nullptr)
+ .release();
+}
+
+static DILocalVariable *createLocalVariable(
+ LLVMContext &VMContext,
+ DenseMap<MDNode *, SmallVector<TrackingMDNodeRef, 1>> &PreservedVariables,
+ DIScope *Scope, StringRef Name, unsigned ArgNo, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool AlwaysPreserve, DINode::DIFlags Flags,
+ uint32_t AlignInBits, DINodeArray Annotations = nullptr) {
+ // FIXME: Why getNonCompileUnitScope()?
+ // FIXME: Why is "!Context" okay here?
+ // FIXME: Why doesn't this check for a subprogram or lexical block (AFAICT
+ // the only valid scopes)?
+ DIScope *Context = getNonCompileUnitScope(Scope);
+
+ auto *Node = DILocalVariable::get(
+ VMContext, cast_or_null<DILocalScope>(Context), Name, File, LineNo, Ty,
+ ArgNo, Flags, AlignInBits, Annotations);
+ if (AlwaysPreserve) {
+ // The optimizer may remove local variables. If there is an interest
+ // to preserve variable info in such situation then stash it in a
+ // named mdnode.
+ DISubprogram *Fn = getDISubprogram(Scope);
+ assert(Fn && "Missing subprogram for local variable");
+ PreservedVariables[Fn].emplace_back(Node);
+ }
+ return Node;
+}
+
+DILocalVariable *DIBuilder::createAutoVariable(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ DIType *Ty, bool AlwaysPreserve,
+ DINode::DIFlags Flags,
+ uint32_t AlignInBits) {
+ return createLocalVariable(VMContext, PreservedVariables, Scope, Name,
+ /* ArgNo */ 0, File, LineNo, Ty, AlwaysPreserve,
+ Flags, AlignInBits);
+}
+
+DILocalVariable *DIBuilder::createParameterVariable(
+ DIScope *Scope, StringRef Name, unsigned ArgNo, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool AlwaysPreserve, DINode::DIFlags Flags,
+ DINodeArray Annotations) {
+ assert(ArgNo && "Expected non-zero argument number for parameter");
+ return createLocalVariable(VMContext, PreservedVariables, Scope, Name, ArgNo,
+ File, LineNo, Ty, AlwaysPreserve, Flags,
+ /*AlignInBits=*/0, Annotations);
+}
+
+DILabel *DIBuilder::createLabel(DIScope *Scope, StringRef Name, DIFile *File,
+ unsigned LineNo, bool AlwaysPreserve) {
+ DIScope *Context = getNonCompileUnitScope(Scope);
+
+ auto *Node = DILabel::get(VMContext, cast_or_null<DILocalScope>(Context),
+ Name, File, LineNo);
+
+ if (AlwaysPreserve) {
+ /// The optimizer may remove labels. If there is an interest
+ /// to preserve label info in such situation then append it to
+ /// the list of retained nodes of the DISubprogram.
+ DISubprogram *Fn = getDISubprogram(Scope);
+ assert(Fn && "Missing subprogram for label");
+ PreservedLabels[Fn].emplace_back(Node);
+ }
+ return Node;
+}
+
+DIExpression *DIBuilder::createExpression(ArrayRef<uint64_t> Addr) {
+ return DIExpression::get(VMContext, Addr);
+}
+
+template <class... Ts>
+static DISubprogram *getSubprogram(bool IsDistinct, Ts &&...Args) {
+ if (IsDistinct)
+ return DISubprogram::getDistinct(std::forward<Ts>(Args)...);
+ return DISubprogram::get(std::forward<Ts>(Args)...);
+}
+
+DISubprogram *DIBuilder::createFunction(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine,
+ DINode::DIFlags Flags, DISubprogram::DISPFlags SPFlags,
+ DITemplateParameterArray TParams, DISubprogram *Decl,
+ DITypeArray ThrownTypes, DINodeArray Annotations,
+ StringRef TargetFuncName) {
+ bool IsDefinition = SPFlags & DISubprogram::SPFlagDefinition;
+ auto *Node = getSubprogram(
+ /*IsDistinct=*/IsDefinition, VMContext, getNonCompileUnitScope(Context),
+ Name, LinkageName, File, LineNo, Ty, ScopeLine, nullptr, 0, 0, Flags,
+ SPFlags, IsDefinition ? CUNode : nullptr, TParams, Decl,
+ MDTuple::getTemporary(VMContext, None).release(), ThrownTypes,
+ Annotations, TargetFuncName);
+
+ if (IsDefinition)
+ AllSubprograms.push_back(Node);
+ trackIfUnresolved(Node);
+ return Node;
+}
+
+DISubprogram *DIBuilder::createTempFunctionFwdDecl(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine,
+ DINode::DIFlags Flags, DISubprogram::DISPFlags SPFlags,
+ DITemplateParameterArray TParams, DISubprogram *Decl,
+ DITypeArray ThrownTypes) {
+ bool IsDefinition = SPFlags & DISubprogram::SPFlagDefinition;
+ return DISubprogram::getTemporary(VMContext, getNonCompileUnitScope(Context),
+ Name, LinkageName, File, LineNo, Ty,
+ ScopeLine, nullptr, 0, 0, Flags, SPFlags,
+ IsDefinition ? CUNode : nullptr, TParams,
+ Decl, nullptr, ThrownTypes)
+ .release();
+}
+
+DISubprogram *DIBuilder::createMethod(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
+ unsigned LineNo, DISubroutineType *Ty, unsigned VIndex, int ThisAdjustment,
+ DIType *VTableHolder, DINode::DIFlags Flags,
+ DISubprogram::DISPFlags SPFlags, DITemplateParameterArray TParams,
+ DITypeArray ThrownTypes) {
+ assert(getNonCompileUnitScope(Context) &&
+ "Methods should have both a Context and a context that isn't "
+ "the compile unit.");
+ // FIXME: Do we want to use different scope/lines?
+ bool IsDefinition = SPFlags & DISubprogram::SPFlagDefinition;
+ auto *SP = getSubprogram(
+ /*IsDistinct=*/IsDefinition, VMContext, cast<DIScope>(Context), Name,
+ LinkageName, F, LineNo, Ty, LineNo, VTableHolder, VIndex, ThisAdjustment,
+ Flags, SPFlags, IsDefinition ? CUNode : nullptr, TParams, nullptr,
+ nullptr, ThrownTypes);
+
+ if (IsDefinition)
+ AllSubprograms.push_back(SP);
+ trackIfUnresolved(SP);
+ return SP;
+}
+
+DICommonBlock *DIBuilder::createCommonBlock(DIScope *Scope,
+ DIGlobalVariable *Decl,
+ StringRef Name, DIFile *File,
+ unsigned LineNo) {
+ return DICommonBlock::get(VMContext, Scope, Decl, Name, File, LineNo);
+}
+
+DINamespace *DIBuilder::createNameSpace(DIScope *Scope, StringRef Name,
+ bool ExportSymbols) {
+
+ // It is okay to *not* make anonymous top-level namespaces distinct, because
+ // all nodes that have an anonymous namespace as their parent scope are
+ // guaranteed to be unique and/or are linked to their containing
+ // DICompileUnit. This decision is an explicit tradeoff of link time versus
+ // memory usage versus code simplicity and may get revisited in the future.
+ return DINamespace::get(VMContext, getNonCompileUnitScope(Scope), Name,
+ ExportSymbols);
+}
+
+DIModule *DIBuilder::createModule(DIScope *Scope, StringRef Name,
+ StringRef ConfigurationMacros,
+ StringRef IncludePath, StringRef APINotesFile,
+ DIFile *File, unsigned LineNo, bool IsDecl) {
+ return DIModule::get(VMContext, File, getNonCompileUnitScope(Scope), Name,
+ ConfigurationMacros, IncludePath, APINotesFile, LineNo,
+ IsDecl);
+}
+
+DILexicalBlockFile *DIBuilder::createLexicalBlockFile(DIScope *Scope,
+ DIFile *File,
+ unsigned Discriminator) {
+ return DILexicalBlockFile::get(VMContext, Scope, File, Discriminator);
+}
+
+DILexicalBlock *DIBuilder::createLexicalBlock(DIScope *Scope, DIFile *File,
+ unsigned Line, unsigned Col) {
+ // Make these distinct, to avoid merging two lexical blocks on the same
+ // file/line/column.
+ return DILexicalBlock::getDistinct(VMContext, getNonCompileUnitScope(Scope),
+ File, Line, Col);
+}
+
+Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ Instruction *InsertBefore) {
+ return insertDeclare(Storage, VarInfo, Expr, DL, InsertBefore->getParent(),
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ // If this block already has a terminator then insert this intrinsic before
+ // the terminator. Otherwise, put it at the end of the block.
+ Instruction *InsertBefore = InsertAtEnd->getTerminator();
+ return insertDeclare(Storage, VarInfo, Expr, DL, InsertAtEnd, InsertBefore);
+}
+
+Instruction *DIBuilder::insertLabel(DILabel *LabelInfo, const DILocation *DL,
+ Instruction *InsertBefore) {
+ return insertLabel(LabelInfo, DL,
+ InsertBefore ? InsertBefore->getParent() : nullptr,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertLabel(DILabel *LabelInfo, const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ return insertLabel(LabelInfo, DL, InsertAtEnd, nullptr);
+}
+
+Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ Instruction *InsertBefore) {
+ return insertDbgValueIntrinsic(
+ V, VarInfo, Expr, DL, InsertBefore ? InsertBefore->getParent() : nullptr,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDbgValueIntrinsic(Value *V,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ return insertDbgValueIntrinsic(V, VarInfo, Expr, DL, InsertAtEnd, nullptr);
+}
+
+Instruction *DIBuilder::insertDbgAddrIntrinsic(Value *V,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ Instruction *InsertBefore) {
+ return insertDbgAddrIntrinsic(
+ V, VarInfo, Expr, DL, InsertBefore ? InsertBefore->getParent() : nullptr,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDbgAddrIntrinsic(Value *V,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertAtEnd) {
+ return insertDbgAddrIntrinsic(V, VarInfo, Expr, DL, InsertAtEnd, nullptr);
+}
+
+/// Initialize IRBuilder for inserting dbg.declare and dbg.value intrinsics.
+/// This abstracts over the various ways to specify an insert position.
+static void initIRBuilder(IRBuilder<> &Builder, const DILocation *DL,
+ BasicBlock *InsertBB, Instruction *InsertBefore) {
+ if (InsertBefore)
+ Builder.SetInsertPoint(InsertBefore);
+ else if (InsertBB)
+ Builder.SetInsertPoint(InsertBB);
+ Builder.SetCurrentDebugLocation(DL);
+}
+
+static Value *getDbgIntrinsicValueImpl(LLVMContext &VMContext, Value *V) {
+ assert(V && "no value passed to dbg intrinsic");
+ return MetadataAsValue::get(VMContext, ValueAsMetadata::get(V));
+}
+
+static Function *getDeclareIntrin(Module &M) {
+ return Intrinsic::getDeclaration(&M, UseDbgAddr ? Intrinsic::dbg_addr
+ : Intrinsic::dbg_declare);
+}
+
+Instruction *DIBuilder::insertDbgValueIntrinsic(
+ llvm::Value *Val, DILocalVariable *VarInfo, DIExpression *Expr,
+ const DILocation *DL, BasicBlock *InsertBB, Instruction *InsertBefore) {
+ if (!ValueFn)
+ ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+ return insertDbgIntrinsic(ValueFn, Val, VarInfo, Expr, DL, InsertBB,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDbgAddrIntrinsic(
+ llvm::Value *Val, DILocalVariable *VarInfo, DIExpression *Expr,
+ const DILocation *DL, BasicBlock *InsertBB, Instruction *InsertBefore) {
+ if (!AddrFn)
+ AddrFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_addr);
+ return insertDbgIntrinsic(AddrFn, Val, VarInfo, Expr, DL, InsertBB,
+ InsertBefore);
+}
+
+Instruction *DIBuilder::insertDeclare(Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ BasicBlock *InsertBB,
+ Instruction *InsertBefore) {
+ assert(VarInfo && "empty or invalid DILocalVariable* passed to dbg.declare");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ VarInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+ if (!DeclareFn)
+ DeclareFn = getDeclareIntrin(M);
+
+ trackIfUnresolved(VarInfo);
+ trackIfUnresolved(Expr);
+ Value *Args[] = {getDbgIntrinsicValueImpl(VMContext, Storage),
+ MetadataAsValue::get(VMContext, VarInfo),
+ MetadataAsValue::get(VMContext, Expr)};
+
+ IRBuilder<> B(DL->getContext());
+ initIRBuilder(B, DL, InsertBB, InsertBefore);
+ return B.CreateCall(DeclareFn, Args);
+}
+
+Instruction *DIBuilder::insertDbgIntrinsic(llvm::Function *IntrinsicFn,
+ Value *V, DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertBB,
+ Instruction *InsertBefore) {
+ assert(IntrinsicFn && "must pass a non-null intrinsic function");
+ assert(V && "must pass a value to a dbg intrinsic");
+ assert(VarInfo &&
+ "empty or invalid DILocalVariable* passed to debug intrinsic");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ VarInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+
+ trackIfUnresolved(VarInfo);
+ trackIfUnresolved(Expr);
+ Value *Args[] = {getDbgIntrinsicValueImpl(VMContext, V),
+ MetadataAsValue::get(VMContext, VarInfo),
+ MetadataAsValue::get(VMContext, Expr)};
+
+ IRBuilder<> B(DL->getContext());
+ initIRBuilder(B, DL, InsertBB, InsertBefore);
+ return B.CreateCall(IntrinsicFn, Args);
+}
+
+Instruction *DIBuilder::insertLabel(DILabel *LabelInfo, const DILocation *DL,
+ BasicBlock *InsertBB,
+ Instruction *InsertBefore) {
+ assert(LabelInfo && "empty or invalid DILabel* passed to dbg.label");
+ assert(DL && "Expected debug loc");
+ assert(DL->getScope()->getSubprogram() ==
+ LabelInfo->getScope()->getSubprogram() &&
+ "Expected matching subprograms");
+ if (!LabelFn)
+ LabelFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_label);
+
+ trackIfUnresolved(LabelInfo);
+ Value *Args[] = {MetadataAsValue::get(VMContext, LabelInfo)};
+
+ IRBuilder<> B(DL->getContext());
+ initIRBuilder(B, DL, InsertBB, InsertBefore);
+ return B.CreateCall(LabelFn, Args);
+}
+
+void DIBuilder::replaceVTableHolder(DICompositeType *&T, DIType *VTableHolder) {
+ {
+ TypedTrackingMDRef<DICompositeType> N(T);
+ N->replaceVTableHolder(VTableHolder);
+ T = N.get();
+ }
+
+ // If this didn't create a self-reference, just return.
+ if (T != VTableHolder)
+ return;
+
+ // Look for unresolved operands. T will drop RAUW support, orphaning any
+ // cycles underneath it.
+ if (T->isResolved())
+ for (const MDOperand &O : T->operands())
+ if (auto *N = dyn_cast_or_null<MDNode>(O))
+ trackIfUnresolved(N);
+}
+
+void DIBuilder::replaceArrays(DICompositeType *&T, DINodeArray Elements,
+ DINodeArray TParams) {
+ {
+ TypedTrackingMDRef<DICompositeType> N(T);
+ if (Elements)
+ N->replaceElements(Elements);
+ if (TParams)
+ N->replaceTemplateParams(DITemplateParameterArray(TParams));
+ T = N.get();
+ }
+
+ // If T isn't resolved, there's no problem.
+ if (!T->isResolved())
+ return;
+
+ // If T is resolved, it may be due to a self-reference cycle. Track the
+ // arrays explicitly if they're unresolved, or else the cycles will be
+ // orphaned.
+ if (Elements)
+ trackIfUnresolved(Elements.get());
+ if (TParams)
+ trackIfUnresolved(TParams.get());
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DataLayout.cpp b/contrib/llvm-project/llvm/lib/IR/DataLayout.cpp
new file mode 100644
index 000000000000..96f55cf14de8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DataLayout.cpp
@@ -0,0 +1,1012 @@
+//===- DataLayout.cpp - Data size & alignment routines ---------------------==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines layout properties related to datatype size/offset/alignment
+// information.
+//
+// This structure should be created once, filled in if the defaults are not
+// correct and then passed around by const&. None of the members functions
+// require modification to the object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DataLayout.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemAlloc.h"
+#include "llvm/Support/TypeSize.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <new>
+#include <utility>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Support for StructLayout
+//===----------------------------------------------------------------------===//
+
+StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
+ assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
+ StructSize = 0;
+ IsPadded = false;
+ NumElements = ST->getNumElements();
+
+ // Loop over each of the elements, placing them in memory.
+ for (unsigned i = 0, e = NumElements; i != e; ++i) {
+ Type *Ty = ST->getElementType(i);
+ const Align TyAlign = ST->isPacked() ? Align(1) : DL.getABITypeAlign(Ty);
+
+ // Add padding if necessary to align the data element properly.
+ if (!isAligned(TyAlign, StructSize)) {
+ IsPadded = true;
+ StructSize = alignTo(StructSize, TyAlign);
+ }
+
+ // Keep track of maximum alignment constraint.
+ StructAlignment = std::max(TyAlign, StructAlignment);
+
+ getMemberOffsets()[i] = StructSize;
+ // Consume space for this data item
+ StructSize += DL.getTypeAllocSize(Ty).getFixedValue();
+ }
+
+ // Add padding to the end of the struct so that it could be put in an array
+ // and all array elements would be aligned correctly.
+ if (!isAligned(StructAlignment, StructSize)) {
+ IsPadded = true;
+ StructSize = alignTo(StructSize, StructAlignment);
+ }
+}
+
+/// getElementContainingOffset - Given a valid offset into the structure,
+/// return the structure index that contains it.
+unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
+ ArrayRef<uint64_t> MemberOffsets = getMemberOffsets();
+ auto SI = llvm::upper_bound(MemberOffsets, Offset);
+ assert(SI != MemberOffsets.begin() && "Offset not in structure type!");
+ --SI;
+ assert(*SI <= Offset && "upper_bound didn't work");
+ assert((SI == MemberOffsets.begin() || *(SI - 1) <= Offset) &&
+ (SI + 1 == MemberOffsets.end() || *(SI + 1) > Offset) &&
+ "Upper bound didn't work!");
+
+ // Multiple fields can have the same offset if any of them are zero sized.
+ // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop
+ // at the i32 element, because it is the last element at that offset. This is
+ // the right one to return, because anything after it will have a higher
+ // offset, implying that this element is non-empty.
+ return SI - MemberOffsets.begin();
+}
+
+//===----------------------------------------------------------------------===//
+// LayoutAlignElem, LayoutAlign support
+//===----------------------------------------------------------------------===//
+
+LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
+ assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+ LayoutAlignElem retval;
+ retval.AlignType = align_type;
+ retval.ABIAlign = abi_align;
+ retval.PrefAlign = pref_align;
+ retval.TypeBitWidth = bit_width;
+ return retval;
+}
+
+bool
+LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
+ return (AlignType == rhs.AlignType
+ && ABIAlign == rhs.ABIAlign
+ && PrefAlign == rhs.PrefAlign
+ && TypeBitWidth == rhs.TypeBitWidth);
+}
+
+//===----------------------------------------------------------------------===//
+// PointerAlignElem, PointerAlign support
+//===----------------------------------------------------------------------===//
+
+PointerAlignElem PointerAlignElem::getInBits(uint32_t AddressSpace,
+ Align ABIAlign, Align PrefAlign,
+ uint32_t TypeBitWidth,
+ uint32_t IndexBitWidth) {
+ assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
+ PointerAlignElem retval;
+ retval.AddressSpace = AddressSpace;
+ retval.ABIAlign = ABIAlign;
+ retval.PrefAlign = PrefAlign;
+ retval.TypeBitWidth = TypeBitWidth;
+ retval.IndexBitWidth = IndexBitWidth;
+ return retval;
+}
+
+bool
+PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
+ return (ABIAlign == rhs.ABIAlign && AddressSpace == rhs.AddressSpace &&
+ PrefAlign == rhs.PrefAlign && TypeBitWidth == rhs.TypeBitWidth &&
+ IndexBitWidth == rhs.IndexBitWidth);
+}
+
+//===----------------------------------------------------------------------===//
+// DataLayout Class Implementation
+//===----------------------------------------------------------------------===//
+
+const char *DataLayout::getManglingComponent(const Triple &T) {
+ if (T.isOSBinFormatGOFF())
+ return "-m:l";
+ if (T.isOSBinFormatMachO())
+ return "-m:o";
+ if (T.isOSWindows() && T.isOSBinFormatCOFF())
+ return T.getArch() == Triple::x86 ? "-m:x" : "-m:w";
+ if (T.isOSBinFormatXCOFF())
+ return "-m:a";
+ return "-m:e";
+}
+
+static const LayoutAlignElem DefaultAlignments[] = {
+ {INTEGER_ALIGN, 1, Align(1), Align(1)}, // i1
+ {INTEGER_ALIGN, 8, Align(1), Align(1)}, // i8
+ {INTEGER_ALIGN, 16, Align(2), Align(2)}, // i16
+ {INTEGER_ALIGN, 32, Align(4), Align(4)}, // i32
+ {INTEGER_ALIGN, 64, Align(4), Align(8)}, // i64
+ {FLOAT_ALIGN, 16, Align(2), Align(2)}, // half, bfloat
+ {FLOAT_ALIGN, 32, Align(4), Align(4)}, // float
+ {FLOAT_ALIGN, 64, Align(8), Align(8)}, // double
+ {FLOAT_ALIGN, 128, Align(16), Align(16)}, // ppcf128, quad, ...
+ {VECTOR_ALIGN, 64, Align(8), Align(8)}, // v2i32, v1i64, ...
+ {VECTOR_ALIGN, 128, Align(16), Align(16)}, // v16i8, v8i16, v4i32, ...
+ {AGGREGATE_ALIGN, 0, Align(1), Align(8)} // struct
+};
+
+void DataLayout::reset(StringRef Desc) {
+ clear();
+
+ LayoutMap = nullptr;
+ BigEndian = false;
+ AllocaAddrSpace = 0;
+ StackNaturalAlign.reset();
+ ProgramAddrSpace = 0;
+ DefaultGlobalsAddrSpace = 0;
+ FunctionPtrAlign.reset();
+ TheFunctionPtrAlignType = FunctionPtrAlignType::Independent;
+ ManglingMode = MM_None;
+ NonIntegralAddressSpaces.clear();
+
+ // Default alignments
+ for (const LayoutAlignElem &E : DefaultAlignments) {
+ if (Error Err = setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign,
+ E.PrefAlign, E.TypeBitWidth))
+ return report_fatal_error(std::move(Err));
+ }
+ if (Error Err = setPointerAlignmentInBits(0, Align(8), Align(8), 64, 64))
+ return report_fatal_error(std::move(Err));
+
+ if (Error Err = parseSpecifier(Desc))
+ return report_fatal_error(std::move(Err));
+}
+
+Expected<DataLayout> DataLayout::parse(StringRef LayoutDescription) {
+ DataLayout Layout("");
+ if (Error Err = Layout.parseSpecifier(LayoutDescription))
+ return std::move(Err);
+ return Layout;
+}
+
+static Error reportError(const Twine &Message) {
+ return createStringError(inconvertibleErrorCode(), Message);
+}
+
+/// Checked version of split, to ensure mandatory subparts.
+static Error split(StringRef Str, char Separator,
+ std::pair<StringRef, StringRef> &Split) {
+ assert(!Str.empty() && "parse error, string can't be empty here");
+ Split = Str.split(Separator);
+ if (Split.second.empty() && Split.first != Str)
+ return reportError("Trailing separator in datalayout string");
+ if (!Split.second.empty() && Split.first.empty())
+ return reportError("Expected token before separator in datalayout string");
+ return Error::success();
+}
+
+/// Get an unsigned integer, including error checks.
+template <typename IntTy> static Error getInt(StringRef R, IntTy &Result) {
+ bool error = R.getAsInteger(10, Result); (void)error;
+ if (error)
+ return reportError("not a number, or does not fit in an unsigned int");
+ return Error::success();
+}
+
+/// Get an unsigned integer representing the number of bits and convert it into
+/// bytes. Error out of not a byte width multiple.
+template <typename IntTy>
+static Error getIntInBytes(StringRef R, IntTy &Result) {
+ if (Error Err = getInt<IntTy>(R, Result))
+ return Err;
+ if (Result % 8)
+ return reportError("number of bits must be a byte width multiple");
+ Result /= 8;
+ return Error::success();
+}
+
+static Error getAddrSpace(StringRef R, unsigned &AddrSpace) {
+ if (Error Err = getInt(R, AddrSpace))
+ return Err;
+ if (!isUInt<24>(AddrSpace))
+ return reportError("Invalid address space, must be a 24-bit integer");
+ return Error::success();
+}
+
+Error DataLayout::parseSpecifier(StringRef Desc) {
+ StringRepresentation = std::string(Desc);
+ while (!Desc.empty()) {
+ // Split at '-'.
+ std::pair<StringRef, StringRef> Split;
+ if (Error Err = ::split(Desc, '-', Split))
+ return Err;
+ Desc = Split.second;
+
+ // Split at ':'.
+ if (Error Err = ::split(Split.first, ':', Split))
+ return Err;
+
+ // Aliases used below.
+ StringRef &Tok = Split.first; // Current token.
+ StringRef &Rest = Split.second; // The rest of the string.
+
+ if (Tok == "ni") {
+ do {
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ Rest = Split.second;
+ unsigned AS;
+ if (Error Err = getInt(Split.first, AS))
+ return Err;
+ if (AS == 0)
+ return reportError("Address space 0 can never be non-integral");
+ NonIntegralAddressSpaces.push_back(AS);
+ } while (!Rest.empty());
+
+ continue;
+ }
+
+ char Specifier = Tok.front();
+ Tok = Tok.substr(1);
+
+ switch (Specifier) {
+ case 's':
+ // Deprecated, but ignoring here to preserve loading older textual llvm
+ // ASM file
+ break;
+ case 'E':
+ BigEndian = true;
+ break;
+ case 'e':
+ BigEndian = false;
+ break;
+ case 'p': {
+ // Address space.
+ unsigned AddrSpace = 0;
+ if (!Tok.empty())
+ if (Error Err = getInt(Tok, AddrSpace))
+ return Err;
+ if (!isUInt<24>(AddrSpace))
+ return reportError("Invalid address space, must be a 24bit integer");
+
+ // Size.
+ if (Rest.empty())
+ return reportError(
+ "Missing size specification for pointer in datalayout string");
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ unsigned PointerMemSize;
+ if (Error Err = getInt(Tok, PointerMemSize))
+ return Err;
+ if (!PointerMemSize)
+ return reportError("Invalid pointer size of 0 bytes");
+
+ // ABI alignment.
+ if (Rest.empty())
+ return reportError(
+ "Missing alignment specification for pointer in datalayout string");
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ unsigned PointerABIAlign;
+ if (Error Err = getIntInBytes(Tok, PointerABIAlign))
+ return Err;
+ if (!isPowerOf2_64(PointerABIAlign))
+ return reportError("Pointer ABI alignment must be a power of 2");
+
+ // Size of index used in GEP for address calculation.
+ // The parameter is optional. By default it is equal to size of pointer.
+ unsigned IndexSize = PointerMemSize;
+
+ // Preferred alignment.
+ unsigned PointerPrefAlign = PointerABIAlign;
+ if (!Rest.empty()) {
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ if (Error Err = getIntInBytes(Tok, PointerPrefAlign))
+ return Err;
+ if (!isPowerOf2_64(PointerPrefAlign))
+ return reportError(
+ "Pointer preferred alignment must be a power of 2");
+
+ // Now read the index. It is the second optional parameter here.
+ if (!Rest.empty()) {
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ if (Error Err = getInt(Tok, IndexSize))
+ return Err;
+ if (!IndexSize)
+ return reportError("Invalid index size of 0 bytes");
+ }
+ }
+ if (Error Err = setPointerAlignmentInBits(
+ AddrSpace, assumeAligned(PointerABIAlign),
+ assumeAligned(PointerPrefAlign), PointerMemSize, IndexSize))
+ return Err;
+ break;
+ }
+ case 'i':
+ case 'v':
+ case 'f':
+ case 'a': {
+ AlignTypeEnum AlignType;
+ switch (Specifier) {
+ default: llvm_unreachable("Unexpected specifier!");
+ case 'i': AlignType = INTEGER_ALIGN; break;
+ case 'v': AlignType = VECTOR_ALIGN; break;
+ case 'f': AlignType = FLOAT_ALIGN; break;
+ case 'a': AlignType = AGGREGATE_ALIGN; break;
+ }
+
+ // Bit size.
+ unsigned Size = 0;
+ if (!Tok.empty())
+ if (Error Err = getInt(Tok, Size))
+ return Err;
+
+ if (AlignType == AGGREGATE_ALIGN && Size != 0)
+ return reportError(
+ "Sized aggregate specification in datalayout string");
+
+ // ABI alignment.
+ if (Rest.empty())
+ return reportError(
+ "Missing alignment specification in datalayout string");
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ unsigned ABIAlign;
+ if (Error Err = getIntInBytes(Tok, ABIAlign))
+ return Err;
+ if (AlignType != AGGREGATE_ALIGN && !ABIAlign)
+ return reportError(
+ "ABI alignment specification must be >0 for non-aggregate types");
+
+ if (!isUInt<16>(ABIAlign))
+ return reportError("Invalid ABI alignment, must be a 16bit integer");
+ if (ABIAlign != 0 && !isPowerOf2_64(ABIAlign))
+ return reportError("Invalid ABI alignment, must be a power of 2");
+
+ // Preferred alignment.
+ unsigned PrefAlign = ABIAlign;
+ if (!Rest.empty()) {
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ if (Error Err = getIntInBytes(Tok, PrefAlign))
+ return Err;
+ }
+
+ if (!isUInt<16>(PrefAlign))
+ return reportError(
+ "Invalid preferred alignment, must be a 16bit integer");
+ if (PrefAlign != 0 && !isPowerOf2_64(PrefAlign))
+ return reportError("Invalid preferred alignment, must be a power of 2");
+
+ if (Error Err = setAlignment(AlignType, assumeAligned(ABIAlign),
+ assumeAligned(PrefAlign), Size))
+ return Err;
+
+ break;
+ }
+ case 'n': // Native integer types.
+ while (true) {
+ unsigned Width;
+ if (Error Err = getInt(Tok, Width))
+ return Err;
+ if (Width == 0)
+ return reportError(
+ "Zero width native integer type in datalayout string");
+ LegalIntWidths.push_back(Width);
+ if (Rest.empty())
+ break;
+ if (Error Err = ::split(Rest, ':', Split))
+ return Err;
+ }
+ break;
+ case 'S': { // Stack natural alignment.
+ uint64_t Alignment;
+ if (Error Err = getIntInBytes(Tok, Alignment))
+ return Err;
+ if (Alignment != 0 && !llvm::isPowerOf2_64(Alignment))
+ return reportError("Alignment is neither 0 nor a power of 2");
+ StackNaturalAlign = MaybeAlign(Alignment);
+ break;
+ }
+ case 'F': {
+ switch (Tok.front()) {
+ case 'i':
+ TheFunctionPtrAlignType = FunctionPtrAlignType::Independent;
+ break;
+ case 'n':
+ TheFunctionPtrAlignType = FunctionPtrAlignType::MultipleOfFunctionAlign;
+ break;
+ default:
+ return reportError("Unknown function pointer alignment type in "
+ "datalayout string");
+ }
+ Tok = Tok.substr(1);
+ uint64_t Alignment;
+ if (Error Err = getIntInBytes(Tok, Alignment))
+ return Err;
+ if (Alignment != 0 && !llvm::isPowerOf2_64(Alignment))
+ return reportError("Alignment is neither 0 nor a power of 2");
+ FunctionPtrAlign = MaybeAlign(Alignment);
+ break;
+ }
+ case 'P': { // Function address space.
+ if (Error Err = getAddrSpace(Tok, ProgramAddrSpace))
+ return Err;
+ break;
+ }
+ case 'A': { // Default stack/alloca address space.
+ if (Error Err = getAddrSpace(Tok, AllocaAddrSpace))
+ return Err;
+ break;
+ }
+ case 'G': { // Default address space for global variables.
+ if (Error Err = getAddrSpace(Tok, DefaultGlobalsAddrSpace))
+ return Err;
+ break;
+ }
+ case 'm':
+ if (!Tok.empty())
+ return reportError("Unexpected trailing characters after mangling "
+ "specifier in datalayout string");
+ if (Rest.empty())
+ return reportError("Expected mangling specifier in datalayout string");
+ if (Rest.size() > 1)
+ return reportError("Unknown mangling specifier in datalayout string");
+ switch(Rest[0]) {
+ default:
+ return reportError("Unknown mangling in datalayout string");
+ case 'e':
+ ManglingMode = MM_ELF;
+ break;
+ case 'l':
+ ManglingMode = MM_GOFF;
+ break;
+ case 'o':
+ ManglingMode = MM_MachO;
+ break;
+ case 'm':
+ ManglingMode = MM_Mips;
+ break;
+ case 'w':
+ ManglingMode = MM_WinCOFF;
+ break;
+ case 'x':
+ ManglingMode = MM_WinCOFFX86;
+ break;
+ case 'a':
+ ManglingMode = MM_XCOFF;
+ break;
+ }
+ break;
+ default:
+ return reportError("Unknown specifier in datalayout string");
+ break;
+ }
+ }
+
+ return Error::success();
+}
+
+DataLayout::DataLayout(const Module *M) {
+ init(M);
+}
+
+void DataLayout::init(const Module *M) { *this = M->getDataLayout(); }
+
+bool DataLayout::operator==(const DataLayout &Other) const {
+ bool Ret = BigEndian == Other.BigEndian &&
+ AllocaAddrSpace == Other.AllocaAddrSpace &&
+ StackNaturalAlign == Other.StackNaturalAlign &&
+ ProgramAddrSpace == Other.ProgramAddrSpace &&
+ DefaultGlobalsAddrSpace == Other.DefaultGlobalsAddrSpace &&
+ FunctionPtrAlign == Other.FunctionPtrAlign &&
+ TheFunctionPtrAlignType == Other.TheFunctionPtrAlignType &&
+ ManglingMode == Other.ManglingMode &&
+ LegalIntWidths == Other.LegalIntWidths &&
+ Alignments == Other.Alignments && Pointers == Other.Pointers;
+ // Note: getStringRepresentation() might differs, it is not canonicalized
+ return Ret;
+}
+
+DataLayout::AlignmentsTy::iterator
+DataLayout::findAlignmentLowerBound(AlignTypeEnum AlignType,
+ uint32_t BitWidth) {
+ auto Pair = std::make_pair((unsigned)AlignType, BitWidth);
+ return partition_point(Alignments, [=](const LayoutAlignElem &E) {
+ return std::make_pair(E.AlignType, E.TypeBitWidth) < Pair;
+ });
+}
+
+Error DataLayout::setAlignment(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
+ // AlignmentsTy::ABIAlign and AlignmentsTy::PrefAlign were once stored as
+ // uint16_t, it is unclear if there are requirements for alignment to be less
+ // than 2^16 other than storage. In the meantime we leave the restriction as
+ // an assert. See D67400 for context.
+ assert(Log2(abi_align) < 16 && Log2(pref_align) < 16 && "Alignment too big");
+ if (!isUInt<24>(bit_width))
+ return reportError("Invalid bit width, must be a 24bit integer");
+ if (pref_align < abi_align)
+ return reportError(
+ "Preferred alignment cannot be less than the ABI alignment");
+
+ AlignmentsTy::iterator I = findAlignmentLowerBound(align_type, bit_width);
+ if (I != Alignments.end() &&
+ I->AlignType == (unsigned)align_type && I->TypeBitWidth == bit_width) {
+ // Update the abi, preferred alignments.
+ I->ABIAlign = abi_align;
+ I->PrefAlign = pref_align;
+ } else {
+ // Insert before I to keep the vector sorted.
+ Alignments.insert(I, LayoutAlignElem::get(align_type, abi_align,
+ pref_align, bit_width));
+ }
+ return Error::success();
+}
+
+const PointerAlignElem &
+DataLayout::getPointerAlignElem(uint32_t AddressSpace) const {
+ if (AddressSpace != 0) {
+ auto I = lower_bound(Pointers, AddressSpace,
+ [](const PointerAlignElem &A, uint32_t AddressSpace) {
+ return A.AddressSpace < AddressSpace;
+ });
+ if (I != Pointers.end() && I->AddressSpace == AddressSpace)
+ return *I;
+ }
+
+ assert(Pointers[0].AddressSpace == 0);
+ return Pointers[0];
+}
+
+Error DataLayout::setPointerAlignmentInBits(uint32_t AddrSpace, Align ABIAlign,
+ Align PrefAlign,
+ uint32_t TypeBitWidth,
+ uint32_t IndexBitWidth) {
+ if (PrefAlign < ABIAlign)
+ return reportError(
+ "Preferred alignment cannot be less than the ABI alignment");
+
+ auto I = lower_bound(Pointers, AddrSpace,
+ [](const PointerAlignElem &A, uint32_t AddressSpace) {
+ return A.AddressSpace < AddressSpace;
+ });
+ if (I == Pointers.end() || I->AddressSpace != AddrSpace) {
+ Pointers.insert(I,
+ PointerAlignElem::getInBits(AddrSpace, ABIAlign, PrefAlign,
+ TypeBitWidth, IndexBitWidth));
+ } else {
+ I->ABIAlign = ABIAlign;
+ I->PrefAlign = PrefAlign;
+ I->TypeBitWidth = TypeBitWidth;
+ I->IndexBitWidth = IndexBitWidth;
+ }
+ return Error::success();
+}
+
+Align DataLayout::getIntegerAlignment(uint32_t BitWidth,
+ bool abi_or_pref) const {
+ auto I = findAlignmentLowerBound(INTEGER_ALIGN, BitWidth);
+ // If we don't have an exact match, use alignment of next larger integer
+ // type. If there is none, use alignment of largest integer type by going
+ // back one element.
+ if (I == Alignments.end() || I->AlignType != INTEGER_ALIGN)
+ --I;
+ assert(I->AlignType == INTEGER_ALIGN && "Must be integer alignment");
+ return abi_or_pref ? I->ABIAlign : I->PrefAlign;
+}
+
+namespace {
+
+class StructLayoutMap {
+ using LayoutInfoTy = DenseMap<StructType*, StructLayout*>;
+ LayoutInfoTy LayoutInfo;
+
+public:
+ ~StructLayoutMap() {
+ // Remove any layouts.
+ for (const auto &I : LayoutInfo) {
+ StructLayout *Value = I.second;
+ Value->~StructLayout();
+ free(Value);
+ }
+ }
+
+ StructLayout *&operator[](StructType *STy) {
+ return LayoutInfo[STy];
+ }
+};
+
+} // end anonymous namespace
+
+void DataLayout::clear() {
+ LegalIntWidths.clear();
+ Alignments.clear();
+ Pointers.clear();
+ delete static_cast<StructLayoutMap *>(LayoutMap);
+ LayoutMap = nullptr;
+}
+
+DataLayout::~DataLayout() {
+ clear();
+}
+
+const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
+ if (!LayoutMap)
+ LayoutMap = new StructLayoutMap();
+
+ StructLayoutMap *STM = static_cast<StructLayoutMap*>(LayoutMap);
+ StructLayout *&SL = (*STM)[Ty];
+ if (SL) return SL;
+
+ // Otherwise, create the struct layout. Because it is variable length, we
+ // malloc it, then use placement new.
+ StructLayout *L = (StructLayout *)safe_malloc(
+ StructLayout::totalSizeToAlloc<uint64_t>(Ty->getNumElements()));
+
+ // Set SL before calling StructLayout's ctor. The ctor could cause other
+ // entries to be added to TheMap, invalidating our reference.
+ SL = L;
+
+ new (L) StructLayout(Ty, *this);
+
+ return L;
+}
+
+Align DataLayout::getPointerABIAlignment(unsigned AS) const {
+ return getPointerAlignElem(AS).ABIAlign;
+}
+
+Align DataLayout::getPointerPrefAlignment(unsigned AS) const {
+ return getPointerAlignElem(AS).PrefAlign;
+}
+
+unsigned DataLayout::getPointerSize(unsigned AS) const {
+ return divideCeil(getPointerAlignElem(AS).TypeBitWidth, 8);
+}
+
+unsigned DataLayout::getMaxIndexSize() const {
+ unsigned MaxIndexSize = 0;
+ for (auto &P : Pointers)
+ MaxIndexSize =
+ std::max(MaxIndexSize, (unsigned)divideCeil(P.TypeBitWidth, 8));
+
+ return MaxIndexSize;
+}
+
+unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "This should only be called with a pointer or pointer vector type");
+ Ty = Ty->getScalarType();
+ return getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace());
+}
+
+unsigned DataLayout::getIndexSize(unsigned AS) const {
+ return divideCeil(getPointerAlignElem(AS).IndexBitWidth, 8);
+}
+
+unsigned DataLayout::getIndexTypeSizeInBits(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "This should only be called with a pointer or pointer vector type");
+ Ty = Ty->getScalarType();
+ return getIndexSizeInBits(cast<PointerType>(Ty)->getAddressSpace());
+}
+
+/*!
+ \param abi_or_pref Flag that determines which alignment is returned. true
+ returns the ABI alignment, false returns the preferred alignment.
+ \param Ty The underlying type for which alignment is determined.
+
+ Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
+ == false) for the requested type \a Ty.
+ */
+Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
+ assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
+ switch (Ty->getTypeID()) {
+ // Early escape for the non-numeric types.
+ case Type::LabelTyID:
+ return abi_or_pref ? getPointerABIAlignment(0) : getPointerPrefAlignment(0);
+ case Type::PointerTyID: {
+ unsigned AS = cast<PointerType>(Ty)->getAddressSpace();
+ return abi_or_pref ? getPointerABIAlignment(AS)
+ : getPointerPrefAlignment(AS);
+ }
+ case Type::ArrayTyID:
+ return getAlignment(cast<ArrayType>(Ty)->getElementType(), abi_or_pref);
+
+ case Type::StructTyID: {
+ // Packed structure types always have an ABI alignment of one.
+ if (cast<StructType>(Ty)->isPacked() && abi_or_pref)
+ return Align(1);
+
+ // Get the layout annotation... which is lazily created on demand.
+ const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
+ const LayoutAlignElem &AggregateAlign = Alignments[0];
+ assert(AggregateAlign.AlignType == AGGREGATE_ALIGN &&
+ "Aggregate alignment must be first alignment entry");
+ const Align Align =
+ abi_or_pref ? AggregateAlign.ABIAlign : AggregateAlign.PrefAlign;
+ return std::max(Align, Layout->getAlignment());
+ }
+ case Type::IntegerTyID:
+ return getIntegerAlignment(Ty->getIntegerBitWidth(), abi_or_pref);
+ case Type::HalfTyID:
+ case Type::BFloatTyID:
+ case Type::FloatTyID:
+ case Type::DoubleTyID:
+ // PPC_FP128TyID and FP128TyID have different data contents, but the
+ // same size and alignment, so they look the same here.
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID:
+ case Type::X86_FP80TyID: {
+ unsigned BitWidth = getTypeSizeInBits(Ty).getFixedSize();
+ auto I = findAlignmentLowerBound(FLOAT_ALIGN, BitWidth);
+ if (I != Alignments.end() && I->AlignType == FLOAT_ALIGN &&
+ I->TypeBitWidth == BitWidth)
+ return abi_or_pref ? I->ABIAlign : I->PrefAlign;
+
+ // If we still couldn't find a reasonable default alignment, fall back
+ // to a simple heuristic that the alignment is the first power of two
+ // greater-or-equal to the store size of the type. This is a reasonable
+ // approximation of reality, and if the user wanted something less
+ // less conservative, they should have specified it explicitly in the data
+ // layout.
+ return Align(PowerOf2Ceil(BitWidth / 8));
+ }
+ case Type::X86_MMXTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID: {
+ unsigned BitWidth = getTypeSizeInBits(Ty).getKnownMinSize();
+ auto I = findAlignmentLowerBound(VECTOR_ALIGN, BitWidth);
+ if (I != Alignments.end() && I->AlignType == VECTOR_ALIGN &&
+ I->TypeBitWidth == BitWidth)
+ return abi_or_pref ? I->ABIAlign : I->PrefAlign;
+
+ // By default, use natural alignment for vector types. This is consistent
+ // with what clang and llvm-gcc do.
+ //
+ // We're only calculating a natural alignment, so it doesn't have to be
+ // based on the full size for scalable vectors. Using the minimum element
+ // count should be enough here.
+ return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinSize()));
+ }
+ case Type::X86_AMXTyID:
+ return Align(64);
+ default:
+ llvm_unreachable("Bad type for getAlignment!!!");
+ }
+}
+
+/// TODO: Remove this function once the transition to Align is over.
+uint64_t DataLayout::getABITypeAlignment(Type *Ty) const {
+ return getABITypeAlign(Ty).value();
+}
+
+Align DataLayout::getABITypeAlign(Type *Ty) const {
+ return getAlignment(Ty, true);
+}
+
+/// TODO: Remove this function once the transition to Align is over.
+uint64_t DataLayout::getPrefTypeAlignment(Type *Ty) const {
+ return getPrefTypeAlign(Ty).value();
+}
+
+Align DataLayout::getPrefTypeAlign(Type *Ty) const {
+ return getAlignment(Ty, false);
+}
+
+IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
+ unsigned AddressSpace) const {
+ return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
+}
+
+Type *DataLayout::getIntPtrType(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "Expected a pointer or pointer vector type.");
+ unsigned NumBits = getPointerTypeSizeInBits(Ty);
+ IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
+ return VectorType::get(IntTy, VecTy);
+ return IntTy;
+}
+
+Type *DataLayout::getSmallestLegalIntType(LLVMContext &C, unsigned Width) const {
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (Width <= LegalIntWidth)
+ return Type::getIntNTy(C, LegalIntWidth);
+ return nullptr;
+}
+
+unsigned DataLayout::getLargestLegalIntTypeSizeInBits() const {
+ auto Max = std::max_element(LegalIntWidths.begin(), LegalIntWidths.end());
+ return Max != LegalIntWidths.end() ? *Max : 0;
+}
+
+Type *DataLayout::getIndexType(Type *Ty) const {
+ assert(Ty->isPtrOrPtrVectorTy() &&
+ "Expected a pointer or pointer vector type.");
+ unsigned NumBits = getIndexTypeSizeInBits(Ty);
+ IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits);
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty))
+ return VectorType::get(IntTy, VecTy);
+ return IntTy;
+}
+
+int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy,
+ ArrayRef<Value *> Indices) const {
+ int64_t Result = 0;
+
+ generic_gep_type_iterator<Value* const*>
+ GTI = gep_type_begin(ElemTy, Indices),
+ GTE = gep_type_end(ElemTy, Indices);
+ for (; GTI != GTE; ++GTI) {
+ Value *Idx = GTI.getOperand();
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx");
+ unsigned FieldNo = cast<ConstantInt>(Idx)->getZExtValue();
+
+ // Get structure layout information...
+ const StructLayout *Layout = getStructLayout(STy);
+
+ // Add in the offset, as calculated by the structure layout info...
+ Result += Layout->getElementOffset(FieldNo);
+ } else {
+ // Get the array index and the size of each array element.
+ if (int64_t arrayIdx = cast<ConstantInt>(Idx)->getSExtValue())
+ Result += arrayIdx * getTypeAllocSize(GTI.getIndexedType());
+ }
+ }
+
+ return Result;
+}
+
+static APInt getElementIndex(TypeSize ElemSize, APInt &Offset) {
+ // Skip over scalable or zero size elements. Also skip element sizes larger
+ // than the positive index space, because the arithmetic below may not be
+ // correct in that case.
+ unsigned BitWidth = Offset.getBitWidth();
+ if (ElemSize.isScalable() || ElemSize == 0 ||
+ !isUIntN(BitWidth - 1, ElemSize)) {
+ return APInt::getZero(BitWidth);
+ }
+
+ APInt Index = Offset.sdiv(ElemSize);
+ Offset -= Index * ElemSize;
+ if (Offset.isNegative()) {
+ // Prefer a positive remaining offset to allow struct indexing.
+ --Index;
+ Offset += ElemSize;
+ assert(Offset.isNonNegative() && "Remaining offset shouldn't be negative");
+ }
+ return Index;
+}
+
+Optional<APInt> DataLayout::getGEPIndexForOffset(Type *&ElemTy,
+ APInt &Offset) const {
+ if (auto *ArrTy = dyn_cast<ArrayType>(ElemTy)) {
+ ElemTy = ArrTy->getElementType();
+ return getElementIndex(getTypeAllocSize(ElemTy), Offset);
+ }
+
+ if (auto *VecTy = dyn_cast<VectorType>(ElemTy)) {
+ ElemTy = VecTy->getElementType();
+ unsigned ElemSizeInBits = getTypeSizeInBits(ElemTy).getFixedSize();
+ // GEPs over non-multiple of 8 size vector elements are invalid.
+ if (ElemSizeInBits % 8 != 0)
+ return None;
+
+ return getElementIndex(TypeSize::Fixed(ElemSizeInBits / 8), Offset);
+ }
+
+ if (auto *STy = dyn_cast<StructType>(ElemTy)) {
+ const StructLayout *SL = getStructLayout(STy);
+ uint64_t IntOffset = Offset.getZExtValue();
+ if (IntOffset >= SL->getSizeInBytes())
+ return None;
+
+ unsigned Index = SL->getElementContainingOffset(IntOffset);
+ Offset -= SL->getElementOffset(Index);
+ ElemTy = STy->getElementType(Index);
+ return APInt(32, Index);
+ }
+
+ // Non-aggregate type.
+ return None;
+}
+
+SmallVector<APInt> DataLayout::getGEPIndicesForOffset(Type *&ElemTy,
+ APInt &Offset) const {
+ assert(ElemTy->isSized() && "Element type must be sized");
+ SmallVector<APInt> Indices;
+ Indices.push_back(getElementIndex(getTypeAllocSize(ElemTy), Offset));
+ while (Offset != 0) {
+ Optional<APInt> Index = getGEPIndexForOffset(ElemTy, Offset);
+ if (!Index)
+ break;
+ Indices.push_back(*Index);
+ }
+
+ return Indices;
+}
+
+/// getPreferredAlign - Return the preferred alignment of the specified global.
+/// This includes an explicitly requested alignment (if the global has one).
+Align DataLayout::getPreferredAlign(const GlobalVariable *GV) const {
+ MaybeAlign GVAlignment = GV->getAlign();
+ // If a section is specified, always precisely honor explicit alignment,
+ // so we don't insert padding into a section we don't control.
+ if (GVAlignment && GV->hasSection())
+ return *GVAlignment;
+
+ // If no explicit alignment is specified, compute the alignment based on
+ // the IR type. If an alignment is specified, increase it to match the ABI
+ // alignment of the IR type.
+ //
+ // FIXME: Not sure it makes sense to use the alignment of the type if
+ // there's already an explicit alignment specification.
+ Type *ElemType = GV->getValueType();
+ Align Alignment = getPrefTypeAlign(ElemType);
+ if (GVAlignment) {
+ if (*GVAlignment >= Alignment)
+ Alignment = *GVAlignment;
+ else
+ Alignment = std::max(*GVAlignment, getABITypeAlign(ElemType));
+ }
+
+ // If no explicit alignment is specified, and the global is large, increase
+ // the alignment to 16.
+ // FIXME: Why 16, specifically?
+ if (GV->hasInitializer() && !GVAlignment) {
+ if (Alignment < Align(16)) {
+ // If the global is not external, see if it is large. If so, give it a
+ // larger alignment.
+ if (getTypeSizeInBits(ElemType) > 128)
+ Alignment = Align(16); // 16-byte alignment.
+ }
+ }
+ return Alignment;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp b/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp
new file mode 100644
index 000000000000..fd4b4170c0a7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DebugInfo.cpp
@@ -0,0 +1,1617 @@
+//===- DebugInfo.cpp - Debug Information Helper Classes -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the helper classes used to build and interpret debug
+// information in LLVM IR form.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/DebugInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
+#include <utility>
+
+using namespace llvm;
+using namespace llvm::dwarf;
+
+/// Finds all intrinsics declaring local variables as living in the memory that
+/// 'V' points to. This may include a mix of dbg.declare and
+/// dbg.addr intrinsics.
+TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
+ // This function is hot. Check whether the value has any metadata to avoid a
+ // DenseMap lookup.
+ if (!V->isUsedByMetadata())
+ return {};
+ auto *L = LocalAsMetadata::getIfExists(V);
+ if (!L)
+ return {};
+ auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
+ if (!MDV)
+ return {};
+
+ TinyPtrVector<DbgVariableIntrinsic *> Declares;
+ for (User *U : MDV->users()) {
+ if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U))
+ if (DII->isAddressOfVariable())
+ Declares.push_back(DII);
+ }
+
+ return Declares;
+}
+
+TinyPtrVector<DbgDeclareInst *> llvm::FindDbgDeclareUses(Value *V) {
+ TinyPtrVector<DbgDeclareInst *> DDIs;
+ for (DbgVariableIntrinsic *DVI : FindDbgAddrUses(V))
+ if (auto *DDI = dyn_cast<DbgDeclareInst>(DVI))
+ DDIs.push_back(DDI);
+ return DDIs;
+}
+
+void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
+ // This function is hot. Check whether the value has any metadata to avoid a
+ // DenseMap lookup.
+ if (!V->isUsedByMetadata())
+ return;
+ // TODO: If this value appears multiple times in a DIArgList, we should still
+ // only add the owning DbgValueInst once; use this set to track ArgListUsers.
+ // This behaviour can be removed when we can automatically remove duplicates.
+ SmallPtrSet<DbgValueInst *, 4> EncounteredDbgValues;
+ if (auto *L = LocalAsMetadata::getIfExists(V)) {
+ if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) {
+ for (User *U : MDV->users())
+ if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
+ DbgValues.push_back(DVI);
+ }
+ for (Metadata *AL : L->getAllArgListUsers()) {
+ if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), AL)) {
+ for (User *U : MDV->users())
+ if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
+ if (EncounteredDbgValues.insert(DVI).second)
+ DbgValues.push_back(DVI);
+ }
+ }
+ }
+}
+
+void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers,
+ Value *V) {
+ // This function is hot. Check whether the value has any metadata to avoid a
+ // DenseMap lookup.
+ if (!V->isUsedByMetadata())
+ return;
+ // TODO: If this value appears multiple times in a DIArgList, we should still
+ // only add the owning DbgValueInst once; use this set to track ArgListUsers.
+ // This behaviour can be removed when we can automatically remove duplicates.
+ SmallPtrSet<DbgVariableIntrinsic *, 4> EncounteredDbgValues;
+ if (auto *L = LocalAsMetadata::getIfExists(V)) {
+ if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) {
+ for (User *U : MDV->users())
+ if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U))
+ DbgUsers.push_back(DII);
+ }
+ for (Metadata *AL : L->getAllArgListUsers()) {
+ if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), AL)) {
+ for (User *U : MDV->users())
+ if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U))
+ if (EncounteredDbgValues.insert(DII).second)
+ DbgUsers.push_back(DII);
+ }
+ }
+ }
+}
+
+DISubprogram *llvm::getDISubprogram(const MDNode *Scope) {
+ if (auto *LocalScope = dyn_cast_or_null<DILocalScope>(Scope))
+ return LocalScope->getSubprogram();
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// DebugInfoFinder implementations.
+//===----------------------------------------------------------------------===//
+
+void DebugInfoFinder::reset() {
+ CUs.clear();
+ SPs.clear();
+ GVs.clear();
+ TYs.clear();
+ Scopes.clear();
+ NodesSeen.clear();
+}
+
+void DebugInfoFinder::processModule(const Module &M) {
+ for (auto *CU : M.debug_compile_units())
+ processCompileUnit(CU);
+ for (auto &F : M.functions()) {
+ if (auto *SP = cast_or_null<DISubprogram>(F.getSubprogram()))
+ processSubprogram(SP);
+ // There could be subprograms from inlined functions referenced from
+ // instructions only. Walk the function to find them.
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ processInstruction(M, I);
+ }
+}
+
+void DebugInfoFinder::processCompileUnit(DICompileUnit *CU) {
+ if (!addCompileUnit(CU))
+ return;
+ for (auto DIG : CU->getGlobalVariables()) {
+ if (!addGlobalVariable(DIG))
+ continue;
+ auto *GV = DIG->getVariable();
+ processScope(GV->getScope());
+ processType(GV->getType());
+ }
+ for (auto *ET : CU->getEnumTypes())
+ processType(ET);
+ for (auto *RT : CU->getRetainedTypes())
+ if (auto *T = dyn_cast<DIType>(RT))
+ processType(T);
+ else
+ processSubprogram(cast<DISubprogram>(RT));
+ for (auto *Import : CU->getImportedEntities()) {
+ auto *Entity = Import->getEntity();
+ if (auto *T = dyn_cast<DIType>(Entity))
+ processType(T);
+ else if (auto *SP = dyn_cast<DISubprogram>(Entity))
+ processSubprogram(SP);
+ else if (auto *NS = dyn_cast<DINamespace>(Entity))
+ processScope(NS->getScope());
+ else if (auto *M = dyn_cast<DIModule>(Entity))
+ processScope(M->getScope());
+ }
+}
+
+void DebugInfoFinder::processInstruction(const Module &M,
+ const Instruction &I) {
+ if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
+ processVariable(M, *DVI);
+
+ if (auto DbgLoc = I.getDebugLoc())
+ processLocation(M, DbgLoc.get());
+}
+
+void DebugInfoFinder::processLocation(const Module &M, const DILocation *Loc) {
+ if (!Loc)
+ return;
+ processScope(Loc->getScope());
+ processLocation(M, Loc->getInlinedAt());
+}
+
+void DebugInfoFinder::processType(DIType *DT) {
+ if (!addType(DT))
+ return;
+ processScope(DT->getScope());
+ if (auto *ST = dyn_cast<DISubroutineType>(DT)) {
+ for (DIType *Ref : ST->getTypeArray())
+ processType(Ref);
+ return;
+ }
+ if (auto *DCT = dyn_cast<DICompositeType>(DT)) {
+ processType(DCT->getBaseType());
+ for (Metadata *D : DCT->getElements()) {
+ if (auto *T = dyn_cast<DIType>(D))
+ processType(T);
+ else if (auto *SP = dyn_cast<DISubprogram>(D))
+ processSubprogram(SP);
+ }
+ return;
+ }
+ if (auto *DDT = dyn_cast<DIDerivedType>(DT)) {
+ processType(DDT->getBaseType());
+ }
+}
+
+void DebugInfoFinder::processScope(DIScope *Scope) {
+ if (!Scope)
+ return;
+ if (auto *Ty = dyn_cast<DIType>(Scope)) {
+ processType(Ty);
+ return;
+ }
+ if (auto *CU = dyn_cast<DICompileUnit>(Scope)) {
+ addCompileUnit(CU);
+ return;
+ }
+ if (auto *SP = dyn_cast<DISubprogram>(Scope)) {
+ processSubprogram(SP);
+ return;
+ }
+ if (!addScope(Scope))
+ return;
+ if (auto *LB = dyn_cast<DILexicalBlockBase>(Scope)) {
+ processScope(LB->getScope());
+ } else if (auto *NS = dyn_cast<DINamespace>(Scope)) {
+ processScope(NS->getScope());
+ } else if (auto *M = dyn_cast<DIModule>(Scope)) {
+ processScope(M->getScope());
+ }
+}
+
+void DebugInfoFinder::processSubprogram(DISubprogram *SP) {
+ if (!addSubprogram(SP))
+ return;
+ processScope(SP->getScope());
+ // Some of the users, e.g. CloneFunctionInto / CloneModule, need to set up a
+ // ValueMap containing identity mappings for all of the DICompileUnit's, not
+ // just DISubprogram's, referenced from anywhere within the Function being
+ // cloned prior to calling MapMetadata / RemapInstruction to avoid their
+ // duplication later as DICompileUnit's are also directly referenced by
+ // llvm.dbg.cu list. Thefore we need to collect DICompileUnit's here as well.
+ // Also, DICompileUnit's may reference DISubprogram's too and therefore need
+ // to be at least looked through.
+ processCompileUnit(SP->getUnit());
+ processType(SP->getType());
+ for (auto *Element : SP->getTemplateParams()) {
+ if (auto *TType = dyn_cast<DITemplateTypeParameter>(Element)) {
+ processType(TType->getType());
+ } else if (auto *TVal = dyn_cast<DITemplateValueParameter>(Element)) {
+ processType(TVal->getType());
+ }
+ }
+}
+
+void DebugInfoFinder::processVariable(const Module &M,
+ const DbgVariableIntrinsic &DVI) {
+ auto *N = dyn_cast<MDNode>(DVI.getVariable());
+ if (!N)
+ return;
+
+ auto *DV = dyn_cast<DILocalVariable>(N);
+ if (!DV)
+ return;
+
+ if (!NodesSeen.insert(DV).second)
+ return;
+ processScope(DV->getScope());
+ processType(DV->getType());
+}
+
+bool DebugInfoFinder::addType(DIType *DT) {
+ if (!DT)
+ return false;
+
+ if (!NodesSeen.insert(DT).second)
+ return false;
+
+ TYs.push_back(const_cast<DIType *>(DT));
+ return true;
+}
+
+bool DebugInfoFinder::addCompileUnit(DICompileUnit *CU) {
+ if (!CU)
+ return false;
+ if (!NodesSeen.insert(CU).second)
+ return false;
+
+ CUs.push_back(CU);
+ return true;
+}
+
+bool DebugInfoFinder::addGlobalVariable(DIGlobalVariableExpression *DIG) {
+ if (!NodesSeen.insert(DIG).second)
+ return false;
+
+ GVs.push_back(DIG);
+ return true;
+}
+
+bool DebugInfoFinder::addSubprogram(DISubprogram *SP) {
+ if (!SP)
+ return false;
+
+ if (!NodesSeen.insert(SP).second)
+ return false;
+
+ SPs.push_back(SP);
+ return true;
+}
+
+bool DebugInfoFinder::addScope(DIScope *Scope) {
+ if (!Scope)
+ return false;
+ // FIXME: Ocaml binding generates a scope with no content, we treat it
+ // as null for now.
+ if (Scope->getNumOperands() == 0)
+ return false;
+ if (!NodesSeen.insert(Scope).second)
+ return false;
+ Scopes.push_back(Scope);
+ return true;
+}
+
+static MDNode *updateLoopMetadataDebugLocationsImpl(
+ MDNode *OrigLoopID, function_ref<Metadata *(Metadata *)> Updater) {
+ assert(OrigLoopID && OrigLoopID->getNumOperands() > 0 &&
+ "Loop ID needs at least one operand");
+ assert(OrigLoopID && OrigLoopID->getOperand(0).get() == OrigLoopID &&
+ "Loop ID should refer to itself");
+
+ // Save space for the self-referential LoopID.
+ SmallVector<Metadata *, 4> MDs = {nullptr};
+
+ for (unsigned i = 1; i < OrigLoopID->getNumOperands(); ++i) {
+ Metadata *MD = OrigLoopID->getOperand(i);
+ if (!MD)
+ MDs.push_back(nullptr);
+ else if (Metadata *NewMD = Updater(MD))
+ MDs.push_back(NewMD);
+ }
+
+ MDNode *NewLoopID = MDNode::getDistinct(OrigLoopID->getContext(), MDs);
+ // Insert the self-referential LoopID.
+ NewLoopID->replaceOperandWith(0, NewLoopID);
+ return NewLoopID;
+}
+
+void llvm::updateLoopMetadataDebugLocations(
+ Instruction &I, function_ref<Metadata *(Metadata *)> Updater) {
+ MDNode *OrigLoopID = I.getMetadata(LLVMContext::MD_loop);
+ if (!OrigLoopID)
+ return;
+ MDNode *NewLoopID = updateLoopMetadataDebugLocationsImpl(OrigLoopID, Updater);
+ I.setMetadata(LLVMContext::MD_loop, NewLoopID);
+}
+
+/// Return true if a node is a DILocation or if a DILocation is
+/// indirectly referenced by one of the node's children.
+static bool isDILocationReachable(SmallPtrSetImpl<Metadata *> &Visited,
+ SmallPtrSetImpl<Metadata *> &Reachable,
+ Metadata *MD) {
+ MDNode *N = dyn_cast_or_null<MDNode>(MD);
+ if (!N)
+ return false;
+ if (isa<DILocation>(N) || Reachable.count(N))
+ return true;
+ if (!Visited.insert(N).second)
+ return false;
+ for (auto &OpIt : N->operands()) {
+ Metadata *Op = OpIt.get();
+ if (isDILocationReachable(Visited, Reachable, Op)) {
+ Reachable.insert(N);
+ return true;
+ }
+ }
+ return false;
+}
+
+static MDNode *stripDebugLocFromLoopID(MDNode *N) {
+ assert(!N->operands().empty() && "Missing self reference?");
+ SmallPtrSet<Metadata *, 8> Visited, DILocationReachable;
+ // If we already visited N, there is nothing to do.
+ if (!Visited.insert(N).second)
+ return N;
+
+ // If there is no debug location, we do not have to rewrite this
+ // MDNode. This loop also initializes DILocationReachable, later
+ // needed by updateLoopMetadataDebugLocationsImpl; the use of
+ // count_if avoids an early exit.
+ if (!std::count_if(N->op_begin() + 1, N->op_end(),
+ [&Visited, &DILocationReachable](const MDOperand &Op) {
+ return isDILocationReachable(
+ Visited, DILocationReachable, Op.get());
+ }))
+ return N;
+
+ // If there is only the debug location without any actual loop metadata, we
+ // can remove the metadata.
+ if (std::all_of(
+ N->op_begin() + 1, N->op_end(),
+ [&Visited, &DILocationReachable](const MDOperand &Op) {
+ return isDILocationReachable(Visited, DILocationReachable,
+ Op.get());
+ }))
+ return nullptr;
+
+ return updateLoopMetadataDebugLocationsImpl(
+ N, [&DILocationReachable](Metadata *MD) -> Metadata * {
+ if (isa<DILocation>(MD) || DILocationReachable.count(MD))
+ return nullptr;
+ return MD;
+ });
+}
+
+bool llvm::stripDebugInfo(Function &F) {
+ bool Changed = false;
+ if (F.hasMetadata(LLVMContext::MD_dbg)) {
+ Changed = true;
+ F.setSubprogram(nullptr);
+ }
+
+ DenseMap<MDNode *, MDNode *> LoopIDsMap;
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : llvm::make_early_inc_range(BB)) {
+ if (isa<DbgInfoIntrinsic>(&I)) {
+ I.eraseFromParent();
+ Changed = true;
+ continue;
+ }
+ if (I.getDebugLoc()) {
+ Changed = true;
+ I.setDebugLoc(DebugLoc());
+ }
+ if (auto *LoopID = I.getMetadata(LLVMContext::MD_loop)) {
+ auto *NewLoopID = LoopIDsMap.lookup(LoopID);
+ if (!NewLoopID)
+ NewLoopID = LoopIDsMap[LoopID] = stripDebugLocFromLoopID(LoopID);
+ if (NewLoopID != LoopID)
+ I.setMetadata(LLVMContext::MD_loop, NewLoopID);
+ }
+ // Strip heapallocsite attachments, they point into the DIType system.
+ if (I.hasMetadataOtherThanDebugLoc())
+ I.setMetadata("heapallocsite", nullptr);
+ }
+ }
+ return Changed;
+}
+
+bool llvm::StripDebugInfo(Module &M) {
+ bool Changed = false;
+
+ for (NamedMDNode &NMD : llvm::make_early_inc_range(M.named_metadata())) {
+ // We're stripping debug info, and without them, coverage information
+ // doesn't quite make sense.
+ if (NMD.getName().startswith("llvm.dbg.") ||
+ NMD.getName() == "llvm.gcov") {
+ NMD.eraseFromParent();
+ Changed = true;
+ }
+ }
+
+ for (Function &F : M)
+ Changed |= stripDebugInfo(F);
+
+ for (auto &GV : M.globals()) {
+ Changed |= GV.eraseMetadata(LLVMContext::MD_dbg);
+ }
+
+ if (GVMaterializer *Materializer = M.getMaterializer())
+ Materializer->setStripDebugInfo();
+
+ return Changed;
+}
+
+namespace {
+
+/// Helper class to downgrade -g metadata to -gline-tables-only metadata.
+class DebugTypeInfoRemoval {
+ DenseMap<Metadata *, Metadata *> Replacements;
+
+public:
+ /// The (void)() type.
+ MDNode *EmptySubroutineType;
+
+private:
+ /// Remember what linkage name we originally had before stripping. If we end
+ /// up making two subprograms identical who originally had different linkage
+ /// names, then we need to make one of them distinct, to avoid them getting
+ /// uniqued. Maps the new node to the old linkage name.
+ DenseMap<DISubprogram *, StringRef> NewToLinkageName;
+
+ // TODO: Remember the distinct subprogram we created for a given linkage name,
+ // so that we can continue to unique whenever possible. Map <newly created
+ // node, old linkage name> to the first (possibly distinct) mdsubprogram
+ // created for that combination. This is not strictly needed for correctness,
+ // but can cut down on the number of MDNodes and let us diff cleanly with the
+ // output of -gline-tables-only.
+
+public:
+ DebugTypeInfoRemoval(LLVMContext &C)
+ : EmptySubroutineType(DISubroutineType::get(C, DINode::FlagZero, 0,
+ MDNode::get(C, {}))) {}
+
+ Metadata *map(Metadata *M) {
+ if (!M)
+ return nullptr;
+ auto Replacement = Replacements.find(M);
+ if (Replacement != Replacements.end())
+ return Replacement->second;
+
+ return M;
+ }
+ MDNode *mapNode(Metadata *N) { return dyn_cast_or_null<MDNode>(map(N)); }
+
+ /// Recursively remap N and all its referenced children. Does a DF post-order
+ /// traversal, so as to remap bottoms up.
+ void traverseAndRemap(MDNode *N) { traverse(N); }
+
+private:
+ // Create a new DISubprogram, to replace the one given.
+ DISubprogram *getReplacementSubprogram(DISubprogram *MDS) {
+ auto *FileAndScope = cast_or_null<DIFile>(map(MDS->getFile()));
+ StringRef LinkageName = MDS->getName().empty() ? MDS->getLinkageName() : "";
+ DISubprogram *Declaration = nullptr;
+ auto *Type = cast_or_null<DISubroutineType>(map(MDS->getType()));
+ DIType *ContainingType =
+ cast_or_null<DIType>(map(MDS->getContainingType()));
+ auto *Unit = cast_or_null<DICompileUnit>(map(MDS->getUnit()));
+ auto Variables = nullptr;
+ auto TemplateParams = nullptr;
+
+ // Make a distinct DISubprogram, for situations that warrent it.
+ auto distinctMDSubprogram = [&]() {
+ return DISubprogram::getDistinct(
+ MDS->getContext(), FileAndScope, MDS->getName(), LinkageName,
+ FileAndScope, MDS->getLine(), Type, MDS->getScopeLine(),
+ ContainingType, MDS->getVirtualIndex(), MDS->getThisAdjustment(),
+ MDS->getFlags(), MDS->getSPFlags(), Unit, TemplateParams, Declaration,
+ Variables);
+ };
+
+ if (MDS->isDistinct())
+ return distinctMDSubprogram();
+
+ auto *NewMDS = DISubprogram::get(
+ MDS->getContext(), FileAndScope, MDS->getName(), LinkageName,
+ FileAndScope, MDS->getLine(), Type, MDS->getScopeLine(), ContainingType,
+ MDS->getVirtualIndex(), MDS->getThisAdjustment(), MDS->getFlags(),
+ MDS->getSPFlags(), Unit, TemplateParams, Declaration, Variables);
+
+ StringRef OldLinkageName = MDS->getLinkageName();
+
+ // See if we need to make a distinct one.
+ auto OrigLinkage = NewToLinkageName.find(NewMDS);
+ if (OrigLinkage != NewToLinkageName.end()) {
+ if (OrigLinkage->second == OldLinkageName)
+ // We're good.
+ return NewMDS;
+
+ // Otherwise, need to make a distinct one.
+ // TODO: Query the map to see if we already have one.
+ return distinctMDSubprogram();
+ }
+
+ NewToLinkageName.insert({NewMDS, MDS->getLinkageName()});
+ return NewMDS;
+ }
+
+ /// Create a new compile unit, to replace the one given
+ DICompileUnit *getReplacementCU(DICompileUnit *CU) {
+ // Drop skeleton CUs.
+ if (CU->getDWOId())
+ return nullptr;
+
+ auto *File = cast_or_null<DIFile>(map(CU->getFile()));
+ MDTuple *EnumTypes = nullptr;
+ MDTuple *RetainedTypes = nullptr;
+ MDTuple *GlobalVariables = nullptr;
+ MDTuple *ImportedEntities = nullptr;
+ return DICompileUnit::getDistinct(
+ CU->getContext(), CU->getSourceLanguage(), File, CU->getProducer(),
+ CU->isOptimized(), CU->getFlags(), CU->getRuntimeVersion(),
+ CU->getSplitDebugFilename(), DICompileUnit::LineTablesOnly, EnumTypes,
+ RetainedTypes, GlobalVariables, ImportedEntities, CU->getMacros(),
+ CU->getDWOId(), CU->getSplitDebugInlining(),
+ CU->getDebugInfoForProfiling(), CU->getNameTableKind(),
+ CU->getRangesBaseAddress(), CU->getSysRoot(), CU->getSDK());
+ }
+
+ DILocation *getReplacementMDLocation(DILocation *MLD) {
+ auto *Scope = map(MLD->getScope());
+ auto *InlinedAt = map(MLD->getInlinedAt());
+ if (MLD->isDistinct())
+ return DILocation::getDistinct(MLD->getContext(), MLD->getLine(),
+ MLD->getColumn(), Scope, InlinedAt);
+ return DILocation::get(MLD->getContext(), MLD->getLine(), MLD->getColumn(),
+ Scope, InlinedAt);
+ }
+
+ /// Create a new generic MDNode, to replace the one given
+ MDNode *getReplacementMDNode(MDNode *N) {
+ SmallVector<Metadata *, 8> Ops;
+ Ops.reserve(N->getNumOperands());
+ for (auto &I : N->operands())
+ if (I)
+ Ops.push_back(map(I));
+ auto *Ret = MDNode::get(N->getContext(), Ops);
+ return Ret;
+ }
+
+ /// Attempt to re-map N to a newly created node.
+ void remap(MDNode *N) {
+ if (Replacements.count(N))
+ return;
+
+ auto doRemap = [&](MDNode *N) -> MDNode * {
+ if (!N)
+ return nullptr;
+ if (auto *MDSub = dyn_cast<DISubprogram>(N)) {
+ remap(MDSub->getUnit());
+ return getReplacementSubprogram(MDSub);
+ }
+ if (isa<DISubroutineType>(N))
+ return EmptySubroutineType;
+ if (auto *CU = dyn_cast<DICompileUnit>(N))
+ return getReplacementCU(CU);
+ if (isa<DIFile>(N))
+ return N;
+ if (auto *MDLB = dyn_cast<DILexicalBlockBase>(N))
+ // Remap to our referenced scope (recursively).
+ return mapNode(MDLB->getScope());
+ if (auto *MLD = dyn_cast<DILocation>(N))
+ return getReplacementMDLocation(MLD);
+
+ // Otherwise, if we see these, just drop them now. Not strictly necessary,
+ // but this speeds things up a little.
+ if (isa<DINode>(N))
+ return nullptr;
+
+ return getReplacementMDNode(N);
+ };
+ Replacements[N] = doRemap(N);
+ }
+
+ /// Do the remapping traversal.
+ void traverse(MDNode *);
+};
+
+} // end anonymous namespace
+
+void DebugTypeInfoRemoval::traverse(MDNode *N) {
+ if (!N || Replacements.count(N))
+ return;
+
+ // To avoid cycles, as well as for efficiency sake, we will sometimes prune
+ // parts of the graph.
+ auto prune = [](MDNode *Parent, MDNode *Child) {
+ if (auto *MDS = dyn_cast<DISubprogram>(Parent))
+ return Child == MDS->getRetainedNodes().get();
+ return false;
+ };
+
+ SmallVector<MDNode *, 16> ToVisit;
+ DenseSet<MDNode *> Opened;
+
+ // Visit each node starting at N in post order, and map them.
+ ToVisit.push_back(N);
+ while (!ToVisit.empty()) {
+ auto *N = ToVisit.back();
+ if (!Opened.insert(N).second) {
+ // Close it.
+ remap(N);
+ ToVisit.pop_back();
+ continue;
+ }
+ for (auto &I : N->operands())
+ if (auto *MDN = dyn_cast_or_null<MDNode>(I))
+ if (!Opened.count(MDN) && !Replacements.count(MDN) && !prune(N, MDN) &&
+ !isa<DICompileUnit>(MDN))
+ ToVisit.push_back(MDN);
+ }
+}
+
+bool llvm::stripNonLineTableDebugInfo(Module &M) {
+ bool Changed = false;
+
+ // First off, delete the debug intrinsics.
+ auto RemoveUses = [&](StringRef Name) {
+ if (auto *DbgVal = M.getFunction(Name)) {
+ while (!DbgVal->use_empty())
+ cast<Instruction>(DbgVal->user_back())->eraseFromParent();
+ DbgVal->eraseFromParent();
+ Changed = true;
+ }
+ };
+ RemoveUses("llvm.dbg.addr");
+ RemoveUses("llvm.dbg.declare");
+ RemoveUses("llvm.dbg.label");
+ RemoveUses("llvm.dbg.value");
+
+ // Delete non-CU debug info named metadata nodes.
+ for (auto NMI = M.named_metadata_begin(), NME = M.named_metadata_end();
+ NMI != NME;) {
+ NamedMDNode *NMD = &*NMI;
+ ++NMI;
+ // Specifically keep dbg.cu around.
+ if (NMD->getName() == "llvm.dbg.cu")
+ continue;
+ }
+
+ // Drop all dbg attachments from global variables.
+ for (auto &GV : M.globals())
+ GV.eraseMetadata(LLVMContext::MD_dbg);
+
+ DebugTypeInfoRemoval Mapper(M.getContext());
+ auto remap = [&](MDNode *Node) -> MDNode * {
+ if (!Node)
+ return nullptr;
+ Mapper.traverseAndRemap(Node);
+ auto *NewNode = Mapper.mapNode(Node);
+ Changed |= Node != NewNode;
+ Node = NewNode;
+ return NewNode;
+ };
+
+ // Rewrite the DebugLocs to be equivalent to what
+ // -gline-tables-only would have created.
+ for (auto &F : M) {
+ if (auto *SP = F.getSubprogram()) {
+ Mapper.traverseAndRemap(SP);
+ auto *NewSP = cast<DISubprogram>(Mapper.mapNode(SP));
+ Changed |= SP != NewSP;
+ F.setSubprogram(NewSP);
+ }
+ for (auto &BB : F) {
+ for (auto &I : BB) {
+ auto remapDebugLoc = [&](const DebugLoc &DL) -> DebugLoc {
+ auto *Scope = DL.getScope();
+ MDNode *InlinedAt = DL.getInlinedAt();
+ Scope = remap(Scope);
+ InlinedAt = remap(InlinedAt);
+ return DILocation::get(M.getContext(), DL.getLine(), DL.getCol(),
+ Scope, InlinedAt);
+ };
+
+ if (I.getDebugLoc() != DebugLoc())
+ I.setDebugLoc(remapDebugLoc(I.getDebugLoc()));
+
+ // Remap DILocations in llvm.loop attachments.
+ updateLoopMetadataDebugLocations(I, [&](Metadata *MD) -> Metadata * {
+ if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
+ return remapDebugLoc(Loc).get();
+ return MD;
+ });
+
+ // Strip heapallocsite attachments, they point into the DIType system.
+ if (I.hasMetadataOtherThanDebugLoc())
+ I.setMetadata("heapallocsite", nullptr);
+ }
+ }
+ }
+
+ // Create a new llvm.dbg.cu, which is equivalent to the one
+ // -gline-tables-only would have created.
+ for (auto &NMD : M.getNamedMDList()) {
+ SmallVector<MDNode *, 8> Ops;
+ for (MDNode *Op : NMD.operands())
+ Ops.push_back(remap(Op));
+
+ if (!Changed)
+ continue;
+
+ NMD.clearOperands();
+ for (auto *Op : Ops)
+ if (Op)
+ NMD.addOperand(Op);
+ }
+ return Changed;
+}
+
+unsigned llvm::getDebugMetadataVersionFromModule(const Module &M) {
+ if (auto *Val = mdconst::dyn_extract_or_null<ConstantInt>(
+ M.getModuleFlag("Debug Info Version")))
+ return Val->getZExtValue();
+ return 0;
+}
+
+void Instruction::applyMergedLocation(const DILocation *LocA,
+ const DILocation *LocB) {
+ setDebugLoc(DILocation::getMergedLocation(LocA, LocB));
+}
+
+void Instruction::updateLocationAfterHoist() { dropLocation(); }
+
+void Instruction::dropLocation() {
+ const DebugLoc &DL = getDebugLoc();
+ if (!DL)
+ return;
+
+ // If this isn't a call, drop the location to allow a location from a
+ // preceding instruction to propagate.
+ if (!isa<CallBase>(this)) {
+ setDebugLoc(DebugLoc());
+ return;
+ }
+
+ // Set a line 0 location for calls to preserve scope information in case
+ // inlining occurs.
+ DISubprogram *SP = getFunction()->getSubprogram();
+ if (SP)
+ // If a function scope is available, set it on the line 0 location. When
+ // hoisting a call to a predecessor block, using the function scope avoids
+ // making it look like the callee was reached earlier than it should be.
+ setDebugLoc(DILocation::get(getContext(), 0, 0, SP));
+ else
+ // The parent function has no scope. Go ahead and drop the location. If
+ // the parent function is inlined, and the callee has a subprogram, the
+ // inliner will attach a location to the call.
+ //
+ // One alternative is to set a line 0 location with the existing scope and
+ // inlinedAt info. The location might be sensitive to when inlining occurs.
+ setDebugLoc(DebugLoc());
+}
+
+//===----------------------------------------------------------------------===//
+// LLVM C API implementations.
+//===----------------------------------------------------------------------===//
+
+static unsigned map_from_llvmDWARFsourcelanguage(LLVMDWARFSourceLanguage lang) {
+ switch (lang) {
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR) \
+ case LLVMDWARFSourceLanguage##NAME: \
+ return ID;
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DW_LANG
+ }
+ llvm_unreachable("Unhandled Tag");
+}
+
+template <typename DIT> DIT *unwrapDI(LLVMMetadataRef Ref) {
+ return (DIT *)(Ref ? unwrap<MDNode>(Ref) : nullptr);
+}
+
+static DINode::DIFlags map_from_llvmDIFlags(LLVMDIFlags Flags) {
+ return static_cast<DINode::DIFlags>(Flags);
+}
+
+static LLVMDIFlags map_to_llvmDIFlags(DINode::DIFlags Flags) {
+ return static_cast<LLVMDIFlags>(Flags);
+}
+
+static DISubprogram::DISPFlags
+pack_into_DISPFlags(bool IsLocalToUnit, bool IsDefinition, bool IsOptimized) {
+ return DISubprogram::toSPFlags(IsLocalToUnit, IsDefinition, IsOptimized);
+}
+
+unsigned LLVMDebugMetadataVersion() {
+ return DEBUG_METADATA_VERSION;
+}
+
+LLVMDIBuilderRef LLVMCreateDIBuilderDisallowUnresolved(LLVMModuleRef M) {
+ return wrap(new DIBuilder(*unwrap(M), false));
+}
+
+LLVMDIBuilderRef LLVMCreateDIBuilder(LLVMModuleRef M) {
+ return wrap(new DIBuilder(*unwrap(M)));
+}
+
+unsigned LLVMGetModuleDebugMetadataVersion(LLVMModuleRef M) {
+ return getDebugMetadataVersionFromModule(*unwrap(M));
+}
+
+LLVMBool LLVMStripModuleDebugInfo(LLVMModuleRef M) {
+ return StripDebugInfo(*unwrap(M));
+}
+
+void LLVMDisposeDIBuilder(LLVMDIBuilderRef Builder) {
+ delete unwrap(Builder);
+}
+
+void LLVMDIBuilderFinalize(LLVMDIBuilderRef Builder) {
+ unwrap(Builder)->finalize();
+}
+
+void LLVMDIBuilderFinalizeSubprogram(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef subprogram) {
+ unwrap(Builder)->finalizeSubprogram(unwrapDI<DISubprogram>(subprogram));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateCompileUnit(
+ LLVMDIBuilderRef Builder, LLVMDWARFSourceLanguage Lang,
+ LLVMMetadataRef FileRef, const char *Producer, size_t ProducerLen,
+ LLVMBool isOptimized, const char *Flags, size_t FlagsLen,
+ unsigned RuntimeVer, const char *SplitName, size_t SplitNameLen,
+ LLVMDWARFEmissionKind Kind, unsigned DWOId, LLVMBool SplitDebugInlining,
+ LLVMBool DebugInfoForProfiling, const char *SysRoot, size_t SysRootLen,
+ const char *SDK, size_t SDKLen) {
+ auto File = unwrapDI<DIFile>(FileRef);
+
+ return wrap(unwrap(Builder)->createCompileUnit(
+ map_from_llvmDWARFsourcelanguage(Lang), File,
+ StringRef(Producer, ProducerLen), isOptimized, StringRef(Flags, FlagsLen),
+ RuntimeVer, StringRef(SplitName, SplitNameLen),
+ static_cast<DICompileUnit::DebugEmissionKind>(Kind), DWOId,
+ SplitDebugInlining, DebugInfoForProfiling,
+ DICompileUnit::DebugNameTableKind::Default, false,
+ StringRef(SysRoot, SysRootLen), StringRef(SDK, SDKLen)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateFile(LLVMDIBuilderRef Builder, const char *Filename,
+ size_t FilenameLen, const char *Directory,
+ size_t DirectoryLen) {
+ return wrap(unwrap(Builder)->createFile(StringRef(Filename, FilenameLen),
+ StringRef(Directory, DirectoryLen)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateModule(LLVMDIBuilderRef Builder, LLVMMetadataRef ParentScope,
+ const char *Name, size_t NameLen,
+ const char *ConfigMacros, size_t ConfigMacrosLen,
+ const char *IncludePath, size_t IncludePathLen,
+ const char *APINotesFile, size_t APINotesFileLen) {
+ return wrap(unwrap(Builder)->createModule(
+ unwrapDI<DIScope>(ParentScope), StringRef(Name, NameLen),
+ StringRef(ConfigMacros, ConfigMacrosLen),
+ StringRef(IncludePath, IncludePathLen),
+ StringRef(APINotesFile, APINotesFileLen)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateNameSpace(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef ParentScope,
+ const char *Name, size_t NameLen,
+ LLVMBool ExportSymbols) {
+ return wrap(unwrap(Builder)->createNameSpace(
+ unwrapDI<DIScope>(ParentScope), StringRef(Name, NameLen), ExportSymbols));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateFunction(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *LinkageName, size_t LinkageNameLen,
+ LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
+ LLVMBool IsLocalToUnit, LLVMBool IsDefinition,
+ unsigned ScopeLine, LLVMDIFlags Flags, LLVMBool IsOptimized) {
+ return wrap(unwrap(Builder)->createFunction(
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, {LinkageName, LinkageNameLen},
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DISubroutineType>(Ty), ScopeLine,
+ map_from_llvmDIFlags(Flags),
+ pack_into_DISPFlags(IsLocalToUnit, IsDefinition, IsOptimized), nullptr,
+ nullptr, nullptr));
+}
+
+
+LLVMMetadataRef LLVMDIBuilderCreateLexicalBlock(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope,
+ LLVMMetadataRef File, unsigned Line, unsigned Col) {
+ return wrap(unwrap(Builder)->createLexicalBlock(unwrapDI<DIScope>(Scope),
+ unwrapDI<DIFile>(File),
+ Line, Col));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateLexicalBlockFile(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Scope,
+ LLVMMetadataRef File,
+ unsigned Discriminator) {
+ return wrap(unwrap(Builder)->createLexicalBlockFile(unwrapDI<DIScope>(Scope),
+ unwrapDI<DIFile>(File),
+ Discriminator));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateImportedModuleFromNamespace(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Scope,
+ LLVMMetadataRef NS,
+ LLVMMetadataRef File,
+ unsigned Line) {
+ return wrap(unwrap(Builder)->createImportedModule(unwrapDI<DIScope>(Scope),
+ unwrapDI<DINamespace>(NS),
+ unwrapDI<DIFile>(File),
+ Line));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateImportedModuleFromAlias(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope,
+ LLVMMetadataRef ImportedEntity, LLVMMetadataRef File, unsigned Line,
+ LLVMMetadataRef *Elements, unsigned NumElements) {
+ auto Elts =
+ (NumElements > 0)
+ ? unwrap(Builder)->getOrCreateArray({unwrap(Elements), NumElements})
+ : nullptr;
+ return wrap(unwrap(Builder)->createImportedModule(
+ unwrapDI<DIScope>(Scope), unwrapDI<DIImportedEntity>(ImportedEntity),
+ unwrapDI<DIFile>(File), Line, Elts));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateImportedModuleFromModule(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, LLVMMetadataRef M,
+ LLVMMetadataRef File, unsigned Line, LLVMMetadataRef *Elements,
+ unsigned NumElements) {
+ auto Elts =
+ (NumElements > 0)
+ ? unwrap(Builder)->getOrCreateArray({unwrap(Elements), NumElements})
+ : nullptr;
+ return wrap(unwrap(Builder)->createImportedModule(
+ unwrapDI<DIScope>(Scope), unwrapDI<DIModule>(M), unwrapDI<DIFile>(File),
+ Line, Elts));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateImportedDeclaration(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, LLVMMetadataRef Decl,
+ LLVMMetadataRef File, unsigned Line, const char *Name, size_t NameLen,
+ LLVMMetadataRef *Elements, unsigned NumElements) {
+ auto Elts =
+ (NumElements > 0)
+ ? unwrap(Builder)->getOrCreateArray({unwrap(Elements), NumElements})
+ : nullptr;
+ return wrap(unwrap(Builder)->createImportedDeclaration(
+ unwrapDI<DIScope>(Scope), unwrapDI<DINode>(Decl), unwrapDI<DIFile>(File),
+ Line, {Name, NameLen}, Elts));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateDebugLocation(LLVMContextRef Ctx, unsigned Line,
+ unsigned Column, LLVMMetadataRef Scope,
+ LLVMMetadataRef InlinedAt) {
+ return wrap(DILocation::get(*unwrap(Ctx), Line, Column, unwrap(Scope),
+ unwrap(InlinedAt)));
+}
+
+unsigned LLVMDILocationGetLine(LLVMMetadataRef Location) {
+ return unwrapDI<DILocation>(Location)->getLine();
+}
+
+unsigned LLVMDILocationGetColumn(LLVMMetadataRef Location) {
+ return unwrapDI<DILocation>(Location)->getColumn();
+}
+
+LLVMMetadataRef LLVMDILocationGetScope(LLVMMetadataRef Location) {
+ return wrap(unwrapDI<DILocation>(Location)->getScope());
+}
+
+LLVMMetadataRef LLVMDILocationGetInlinedAt(LLVMMetadataRef Location) {
+ return wrap(unwrapDI<DILocation>(Location)->getInlinedAt());
+}
+
+LLVMMetadataRef LLVMDIScopeGetFile(LLVMMetadataRef Scope) {
+ return wrap(unwrapDI<DIScope>(Scope)->getFile());
+}
+
+const char *LLVMDIFileGetDirectory(LLVMMetadataRef File, unsigned *Len) {
+ auto Dir = unwrapDI<DIFile>(File)->getDirectory();
+ *Len = Dir.size();
+ return Dir.data();
+}
+
+const char *LLVMDIFileGetFilename(LLVMMetadataRef File, unsigned *Len) {
+ auto Name = unwrapDI<DIFile>(File)->getFilename();
+ *Len = Name.size();
+ return Name.data();
+}
+
+const char *LLVMDIFileGetSource(LLVMMetadataRef File, unsigned *Len) {
+ if (auto Src = unwrapDI<DIFile>(File)->getSource()) {
+ *Len = Src->size();
+ return Src->data();
+ }
+ *Len = 0;
+ return "";
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateMacro(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef ParentMacroFile,
+ unsigned Line,
+ LLVMDWARFMacinfoRecordType RecordType,
+ const char *Name, size_t NameLen,
+ const char *Value, size_t ValueLen) {
+ return wrap(
+ unwrap(Builder)->createMacro(unwrapDI<DIMacroFile>(ParentMacroFile), Line,
+ static_cast<MacinfoRecordType>(RecordType),
+ {Name, NameLen}, {Value, ValueLen}));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateTempMacroFile(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef ParentMacroFile, unsigned Line,
+ LLVMMetadataRef File) {
+ return wrap(unwrap(Builder)->createTempMacroFile(
+ unwrapDI<DIMacroFile>(ParentMacroFile), Line, unwrapDI<DIFile>(File)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateEnumerator(LLVMDIBuilderRef Builder,
+ const char *Name, size_t NameLen,
+ int64_t Value,
+ LLVMBool IsUnsigned) {
+ return wrap(unwrap(Builder)->createEnumerator({Name, NameLen}, Value,
+ IsUnsigned != 0));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateEnumerationType(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, LLVMMetadataRef *Elements,
+ unsigned NumElements, LLVMMetadataRef ClassTy) {
+auto Elts = unwrap(Builder)->getOrCreateArray({unwrap(Elements),
+ NumElements});
+return wrap(unwrap(Builder)->createEnumerationType(
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, unwrapDI<DIFile>(File),
+ LineNumber, SizeInBits, AlignInBits, Elts, unwrapDI<DIType>(ClassTy)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateUnionType(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, LLVMDIFlags Flags,
+ LLVMMetadataRef *Elements, unsigned NumElements, unsigned RunTimeLang,
+ const char *UniqueId, size_t UniqueIdLen) {
+ auto Elts = unwrap(Builder)->getOrCreateArray({unwrap(Elements),
+ NumElements});
+ return wrap(unwrap(Builder)->createUnionType(
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, unwrapDI<DIFile>(File),
+ LineNumber, SizeInBits, AlignInBits, map_from_llvmDIFlags(Flags),
+ Elts, RunTimeLang, {UniqueId, UniqueIdLen}));
+}
+
+
+LLVMMetadataRef
+LLVMDIBuilderCreateArrayType(LLVMDIBuilderRef Builder, uint64_t Size,
+ uint32_t AlignInBits, LLVMMetadataRef Ty,
+ LLVMMetadataRef *Subscripts,
+ unsigned NumSubscripts) {
+ auto Subs = unwrap(Builder)->getOrCreateArray({unwrap(Subscripts),
+ NumSubscripts});
+ return wrap(unwrap(Builder)->createArrayType(Size, AlignInBits,
+ unwrapDI<DIType>(Ty), Subs));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateVectorType(LLVMDIBuilderRef Builder, uint64_t Size,
+ uint32_t AlignInBits, LLVMMetadataRef Ty,
+ LLVMMetadataRef *Subscripts,
+ unsigned NumSubscripts) {
+ auto Subs = unwrap(Builder)->getOrCreateArray({unwrap(Subscripts),
+ NumSubscripts});
+ return wrap(unwrap(Builder)->createVectorType(Size, AlignInBits,
+ unwrapDI<DIType>(Ty), Subs));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateBasicType(LLVMDIBuilderRef Builder, const char *Name,
+ size_t NameLen, uint64_t SizeInBits,
+ LLVMDWARFTypeEncoding Encoding,
+ LLVMDIFlags Flags) {
+ return wrap(unwrap(Builder)->createBasicType({Name, NameLen},
+ SizeInBits, Encoding,
+ map_from_llvmDIFlags(Flags)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreatePointerType(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef PointeeTy,
+ uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(Builder)->createPointerType(unwrapDI<DIType>(PointeeTy),
+ SizeInBits, AlignInBits,
+ AddressSpace, {Name, NameLen}));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateStructType(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, LLVMDIFlags Flags,
+ LLVMMetadataRef DerivedFrom, LLVMMetadataRef *Elements,
+ unsigned NumElements, unsigned RunTimeLang, LLVMMetadataRef VTableHolder,
+ const char *UniqueId, size_t UniqueIdLen) {
+ auto Elts = unwrap(Builder)->getOrCreateArray({unwrap(Elements),
+ NumElements});
+ return wrap(unwrap(Builder)->createStructType(
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, unwrapDI<DIFile>(File),
+ LineNumber, SizeInBits, AlignInBits, map_from_llvmDIFlags(Flags),
+ unwrapDI<DIType>(DerivedFrom), Elts, RunTimeLang,
+ unwrapDI<DIType>(VTableHolder), {UniqueId, UniqueIdLen}));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateMemberType(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, LLVMDIFlags Flags,
+ LLVMMetadataRef Ty) {
+ return wrap(unwrap(Builder)->createMemberType(unwrapDI<DIScope>(Scope),
+ {Name, NameLen}, unwrapDI<DIFile>(File), LineNo, SizeInBits, AlignInBits,
+ OffsetInBits, map_from_llvmDIFlags(Flags), unwrapDI<DIType>(Ty)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateUnspecifiedType(LLVMDIBuilderRef Builder, const char *Name,
+ size_t NameLen) {
+ return wrap(unwrap(Builder)->createUnspecifiedType({Name, NameLen}));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateStaticMemberType(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
+ LLVMMetadataRef Type, LLVMDIFlags Flags, LLVMValueRef ConstantVal,
+ uint32_t AlignInBits) {
+ return wrap(unwrap(Builder)->createStaticMemberType(
+ unwrapDI<DIScope>(Scope), {Name, NameLen},
+ unwrapDI<DIFile>(File), LineNumber, unwrapDI<DIType>(Type),
+ map_from_llvmDIFlags(Flags), unwrap<Constant>(ConstantVal),
+ AlignInBits));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateObjCIVar(LLVMDIBuilderRef Builder,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNo,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ uint64_t OffsetInBits, LLVMDIFlags Flags,
+ LLVMMetadataRef Ty, LLVMMetadataRef PropertyNode) {
+ return wrap(unwrap(Builder)->createObjCIVar(
+ {Name, NameLen}, unwrapDI<DIFile>(File), LineNo,
+ SizeInBits, AlignInBits, OffsetInBits,
+ map_from_llvmDIFlags(Flags), unwrapDI<DIType>(Ty),
+ unwrapDI<MDNode>(PropertyNode)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateObjCProperty(LLVMDIBuilderRef Builder,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNo,
+ const char *GetterName, size_t GetterNameLen,
+ const char *SetterName, size_t SetterNameLen,
+ unsigned PropertyAttributes,
+ LLVMMetadataRef Ty) {
+ return wrap(unwrap(Builder)->createObjCProperty(
+ {Name, NameLen}, unwrapDI<DIFile>(File), LineNo,
+ {GetterName, GetterNameLen}, {SetterName, SetterNameLen},
+ PropertyAttributes, unwrapDI<DIType>(Ty)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateObjectPointerType(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Type) {
+ return wrap(unwrap(Builder)->createObjectPointerType(unwrapDI<DIType>(Type)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateTypedef(LLVMDIBuilderRef Builder, LLVMMetadataRef Type,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNo,
+ LLVMMetadataRef Scope, uint32_t AlignInBits) {
+ return wrap(unwrap(Builder)->createTypedef(
+ unwrapDI<DIType>(Type), {Name, NameLen}, unwrapDI<DIFile>(File), LineNo,
+ unwrapDI<DIScope>(Scope), AlignInBits));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateInheritance(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Ty, LLVMMetadataRef BaseTy,
+ uint64_t BaseOffset, uint32_t VBPtrOffset,
+ LLVMDIFlags Flags) {
+ return wrap(unwrap(Builder)->createInheritance(
+ unwrapDI<DIType>(Ty), unwrapDI<DIType>(BaseTy),
+ BaseOffset, VBPtrOffset, map_from_llvmDIFlags(Flags)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateForwardDecl(
+ LLVMDIBuilderRef Builder, unsigned Tag, const char *Name,
+ size_t NameLen, LLVMMetadataRef Scope, LLVMMetadataRef File, unsigned Line,
+ unsigned RuntimeLang, uint64_t SizeInBits, uint32_t AlignInBits,
+ const char *UniqueIdentifier, size_t UniqueIdentifierLen) {
+ return wrap(unwrap(Builder)->createForwardDecl(
+ Tag, {Name, NameLen}, unwrapDI<DIScope>(Scope),
+ unwrapDI<DIFile>(File), Line, RuntimeLang, SizeInBits,
+ AlignInBits, {UniqueIdentifier, UniqueIdentifierLen}));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateReplaceableCompositeType(
+ LLVMDIBuilderRef Builder, unsigned Tag, const char *Name,
+ size_t NameLen, LLVMMetadataRef Scope, LLVMMetadataRef File, unsigned Line,
+ unsigned RuntimeLang, uint64_t SizeInBits, uint32_t AlignInBits,
+ LLVMDIFlags Flags, const char *UniqueIdentifier,
+ size_t UniqueIdentifierLen) {
+ return wrap(unwrap(Builder)->createReplaceableCompositeType(
+ Tag, {Name, NameLen}, unwrapDI<DIScope>(Scope),
+ unwrapDI<DIFile>(File), Line, RuntimeLang, SizeInBits,
+ AlignInBits, map_from_llvmDIFlags(Flags),
+ {UniqueIdentifier, UniqueIdentifierLen}));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateQualifiedType(LLVMDIBuilderRef Builder, unsigned Tag,
+ LLVMMetadataRef Type) {
+ return wrap(unwrap(Builder)->createQualifiedType(Tag,
+ unwrapDI<DIType>(Type)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateReferenceType(LLVMDIBuilderRef Builder, unsigned Tag,
+ LLVMMetadataRef Type) {
+ return wrap(unwrap(Builder)->createReferenceType(Tag,
+ unwrapDI<DIType>(Type)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateNullPtrType(LLVMDIBuilderRef Builder) {
+ return wrap(unwrap(Builder)->createNullPtrType());
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateMemberPointerType(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef PointeeType,
+ LLVMMetadataRef ClassType,
+ uint64_t SizeInBits,
+ uint32_t AlignInBits,
+ LLVMDIFlags Flags) {
+ return wrap(unwrap(Builder)->createMemberPointerType(
+ unwrapDI<DIType>(PointeeType),
+ unwrapDI<DIType>(ClassType), AlignInBits, SizeInBits,
+ map_from_llvmDIFlags(Flags)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateBitFieldMemberType(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNumber,
+ uint64_t SizeInBits,
+ uint64_t OffsetInBits,
+ uint64_t StorageOffsetInBits,
+ LLVMDIFlags Flags, LLVMMetadataRef Type) {
+ return wrap(unwrap(Builder)->createBitFieldMemberType(
+ unwrapDI<DIScope>(Scope), {Name, NameLen},
+ unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, OffsetInBits, StorageOffsetInBits,
+ map_from_llvmDIFlags(Flags), unwrapDI<DIType>(Type)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateClassType(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Scope, const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, LLVMDIFlags Flags,
+ LLVMMetadataRef DerivedFrom,
+ LLVMMetadataRef *Elements, unsigned NumElements,
+ LLVMMetadataRef VTableHolder, LLVMMetadataRef TemplateParamsNode,
+ const char *UniqueIdentifier, size_t UniqueIdentifierLen) {
+ auto Elts = unwrap(Builder)->getOrCreateArray({unwrap(Elements),
+ NumElements});
+ return wrap(unwrap(Builder)->createClassType(
+ unwrapDI<DIScope>(Scope), {Name, NameLen},
+ unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, AlignInBits, OffsetInBits,
+ map_from_llvmDIFlags(Flags), unwrapDI<DIType>(DerivedFrom),
+ Elts, unwrapDI<DIType>(VTableHolder),
+ unwrapDI<MDNode>(TemplateParamsNode),
+ {UniqueIdentifier, UniqueIdentifierLen}));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateArtificialType(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef Type) {
+ return wrap(unwrap(Builder)->createArtificialType(unwrapDI<DIType>(Type)));
+}
+
+const char *LLVMDITypeGetName(LLVMMetadataRef DType, size_t *Length) {
+ StringRef Str = unwrap<DIType>(DType)->getName();
+ *Length = Str.size();
+ return Str.data();
+}
+
+uint64_t LLVMDITypeGetSizeInBits(LLVMMetadataRef DType) {
+ return unwrapDI<DIType>(DType)->getSizeInBits();
+}
+
+uint64_t LLVMDITypeGetOffsetInBits(LLVMMetadataRef DType) {
+ return unwrapDI<DIType>(DType)->getOffsetInBits();
+}
+
+uint32_t LLVMDITypeGetAlignInBits(LLVMMetadataRef DType) {
+ return unwrapDI<DIType>(DType)->getAlignInBits();
+}
+
+unsigned LLVMDITypeGetLine(LLVMMetadataRef DType) {
+ return unwrapDI<DIType>(DType)->getLine();
+}
+
+LLVMDIFlags LLVMDITypeGetFlags(LLVMMetadataRef DType) {
+ return map_to_llvmDIFlags(unwrapDI<DIType>(DType)->getFlags());
+}
+
+LLVMMetadataRef LLVMDIBuilderGetOrCreateTypeArray(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef *Types,
+ size_t Length) {
+ return wrap(
+ unwrap(Builder)->getOrCreateTypeArray({unwrap(Types), Length}).get());
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateSubroutineType(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef File,
+ LLVMMetadataRef *ParameterTypes,
+ unsigned NumParameterTypes,
+ LLVMDIFlags Flags) {
+ auto Elts = unwrap(Builder)->getOrCreateTypeArray({unwrap(ParameterTypes),
+ NumParameterTypes});
+ return wrap(unwrap(Builder)->createSubroutineType(
+ Elts, map_from_llvmDIFlags(Flags)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateExpression(LLVMDIBuilderRef Builder,
+ uint64_t *Addr, size_t Length) {
+ return wrap(
+ unwrap(Builder)->createExpression(ArrayRef<uint64_t>(Addr, Length)));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateConstantValueExpression(LLVMDIBuilderRef Builder,
+ uint64_t Value) {
+ return wrap(unwrap(Builder)->createConstantValueExpression(Value));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateGlobalVariableExpression(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *Linkage, size_t LinkLen, LLVMMetadataRef File,
+ unsigned LineNo, LLVMMetadataRef Ty, LLVMBool LocalToUnit,
+ LLVMMetadataRef Expr, LLVMMetadataRef Decl, uint32_t AlignInBits) {
+ return wrap(unwrap(Builder)->createGlobalVariableExpression(
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, {Linkage, LinkLen},
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), LocalToUnit,
+ true, unwrap<DIExpression>(Expr), unwrapDI<MDNode>(Decl),
+ nullptr, AlignInBits));
+}
+
+LLVMMetadataRef LLVMDIGlobalVariableExpressionGetVariable(LLVMMetadataRef GVE) {
+ return wrap(unwrapDI<DIGlobalVariableExpression>(GVE)->getVariable());
+}
+
+LLVMMetadataRef LLVMDIGlobalVariableExpressionGetExpression(
+ LLVMMetadataRef GVE) {
+ return wrap(unwrapDI<DIGlobalVariableExpression>(GVE)->getExpression());
+}
+
+LLVMMetadataRef LLVMDIVariableGetFile(LLVMMetadataRef Var) {
+ return wrap(unwrapDI<DIVariable>(Var)->getFile());
+}
+
+LLVMMetadataRef LLVMDIVariableGetScope(LLVMMetadataRef Var) {
+ return wrap(unwrapDI<DIVariable>(Var)->getScope());
+}
+
+unsigned LLVMDIVariableGetLine(LLVMMetadataRef Var) {
+ return unwrapDI<DIVariable>(Var)->getLine();
+}
+
+LLVMMetadataRef LLVMTemporaryMDNode(LLVMContextRef Ctx, LLVMMetadataRef *Data,
+ size_t Count) {
+ return wrap(
+ MDTuple::getTemporary(*unwrap(Ctx), {unwrap(Data), Count}).release());
+}
+
+void LLVMDisposeTemporaryMDNode(LLVMMetadataRef TempNode) {
+ MDNode::deleteTemporary(unwrapDI<MDNode>(TempNode));
+}
+
+void LLVMMetadataReplaceAllUsesWith(LLVMMetadataRef TargetMetadata,
+ LLVMMetadataRef Replacement) {
+ auto *Node = unwrapDI<MDNode>(TargetMetadata);
+ Node->replaceAllUsesWith(unwrap<Metadata>(Replacement));
+ MDNode::deleteTemporary(Node);
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateTempGlobalVariableFwdDecl(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *Linkage, size_t LnkLen, LLVMMetadataRef File,
+ unsigned LineNo, LLVMMetadataRef Ty, LLVMBool LocalToUnit,
+ LLVMMetadataRef Decl, uint32_t AlignInBits) {
+ return wrap(unwrap(Builder)->createTempGlobalVariableFwdDecl(
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, {Linkage, LnkLen},
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), LocalToUnit,
+ unwrapDI<MDNode>(Decl), nullptr, AlignInBits));
+}
+
+LLVMValueRef
+LLVMDIBuilderInsertDeclareBefore(LLVMDIBuilderRef Builder, LLVMValueRef Storage,
+ LLVMMetadataRef VarInfo, LLVMMetadataRef Expr,
+ LLVMMetadataRef DL, LLVMValueRef Instr) {
+ return wrap(unwrap(Builder)->insertDeclare(
+ unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
+ unwrap<DIExpression>(Expr), unwrap<DILocation>(DL),
+ unwrap<Instruction>(Instr)));
+}
+
+LLVMValueRef LLVMDIBuilderInsertDeclareAtEnd(
+ LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
+ LLVMMetadataRef Expr, LLVMMetadataRef DL, LLVMBasicBlockRef Block) {
+ return wrap(unwrap(Builder)->insertDeclare(
+ unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
+ unwrap<DIExpression>(Expr), unwrap<DILocation>(DL),
+ unwrap(Block)));
+}
+
+LLVMValueRef LLVMDIBuilderInsertDbgValueBefore(LLVMDIBuilderRef Builder,
+ LLVMValueRef Val,
+ LLVMMetadataRef VarInfo,
+ LLVMMetadataRef Expr,
+ LLVMMetadataRef DebugLoc,
+ LLVMValueRef Instr) {
+ return wrap(unwrap(Builder)->insertDbgValueIntrinsic(
+ unwrap(Val), unwrap<DILocalVariable>(VarInfo),
+ unwrap<DIExpression>(Expr), unwrap<DILocation>(DebugLoc),
+ unwrap<Instruction>(Instr)));
+}
+
+LLVMValueRef LLVMDIBuilderInsertDbgValueAtEnd(LLVMDIBuilderRef Builder,
+ LLVMValueRef Val,
+ LLVMMetadataRef VarInfo,
+ LLVMMetadataRef Expr,
+ LLVMMetadataRef DebugLoc,
+ LLVMBasicBlockRef Block) {
+ return wrap(unwrap(Builder)->insertDbgValueIntrinsic(
+ unwrap(Val), unwrap<DILocalVariable>(VarInfo),
+ unwrap<DIExpression>(Expr), unwrap<DILocation>(DebugLoc),
+ unwrap(Block)));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateAutoVariable(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
+ LLVMBool AlwaysPreserve, LLVMDIFlags Flags, uint32_t AlignInBits) {
+ return wrap(unwrap(Builder)->createAutoVariable(
+ unwrap<DIScope>(Scope), {Name, NameLen}, unwrap<DIFile>(File),
+ LineNo, unwrap<DIType>(Ty), AlwaysPreserve,
+ map_from_llvmDIFlags(Flags), AlignInBits));
+}
+
+LLVMMetadataRef LLVMDIBuilderCreateParameterVariable(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, unsigned ArgNo, LLVMMetadataRef File, unsigned LineNo,
+ LLVMMetadataRef Ty, LLVMBool AlwaysPreserve, LLVMDIFlags Flags) {
+ return wrap(unwrap(Builder)->createParameterVariable(
+ unwrap<DIScope>(Scope), {Name, NameLen}, ArgNo, unwrap<DIFile>(File),
+ LineNo, unwrap<DIType>(Ty), AlwaysPreserve,
+ map_from_llvmDIFlags(Flags)));
+}
+
+LLVMMetadataRef LLVMDIBuilderGetOrCreateSubrange(LLVMDIBuilderRef Builder,
+ int64_t Lo, int64_t Count) {
+ return wrap(unwrap(Builder)->getOrCreateSubrange(Lo, Count));
+}
+
+LLVMMetadataRef LLVMDIBuilderGetOrCreateArray(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef *Data,
+ size_t Length) {
+ Metadata **DataValue = unwrap(Data);
+ return wrap(unwrap(Builder)->getOrCreateArray({DataValue, Length}).get());
+}
+
+LLVMMetadataRef LLVMGetSubprogram(LLVMValueRef Func) {
+ return wrap(unwrap<Function>(Func)->getSubprogram());
+}
+
+void LLVMSetSubprogram(LLVMValueRef Func, LLVMMetadataRef SP) {
+ unwrap<Function>(Func)->setSubprogram(unwrap<DISubprogram>(SP));
+}
+
+unsigned LLVMDISubprogramGetLine(LLVMMetadataRef Subprogram) {
+ return unwrapDI<DISubprogram>(Subprogram)->getLine();
+}
+
+LLVMMetadataRef LLVMInstructionGetDebugLoc(LLVMValueRef Inst) {
+ return wrap(unwrap<Instruction>(Inst)->getDebugLoc().getAsMDNode());
+}
+
+void LLVMInstructionSetDebugLoc(LLVMValueRef Inst, LLVMMetadataRef Loc) {
+ if (Loc)
+ unwrap<Instruction>(Inst)->setDebugLoc(DebugLoc(unwrap<MDNode>(Loc)));
+ else
+ unwrap<Instruction>(Inst)->setDebugLoc(DebugLoc());
+}
+
+LLVMMetadataKind LLVMGetMetadataKind(LLVMMetadataRef Metadata) {
+ switch(unwrap(Metadata)->getMetadataID()) {
+#define HANDLE_METADATA_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ return (LLVMMetadataKind)LLVM##CLASS##MetadataKind;
+#include "llvm/IR/Metadata.def"
+ default:
+ return (LLVMMetadataKind)LLVMGenericDINodeMetadataKind;
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp b/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp
new file mode 100644
index 000000000000..50799327c78a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -0,0 +1,1840 @@
+//===- DebugInfoMetadata.cpp - Implement debug info metadata --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the debug info Metadata classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "LLVMContextImpl.h"
+#include "MetadataImpl.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+
+#include <numeric>
+
+using namespace llvm;
+
+namespace llvm {
+// Use FS-AFDO discriminator.
+cl::opt<bool> EnableFSDiscriminator(
+ "enable-fs-discriminator", cl::Hidden,
+ cl::desc("Enable adding flow sensitive discriminators"));
+} // namespace llvm
+
+const DIExpression::FragmentInfo DebugVariable::DefaultFragment = {
+ std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::min()};
+
+DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Column, ArrayRef<Metadata *> MDs,
+ bool ImplicitCode)
+ : MDNode(C, DILocationKind, Storage, MDs) {
+ assert((MDs.size() == 1 || MDs.size() == 2) &&
+ "Expected a scope and optional inlined-at");
+
+ // Set line and column.
+ assert(Column < (1u << 16) && "Expected 16-bit column");
+
+ SubclassData32 = Line;
+ SubclassData16 = Column;
+
+ setImplicitCode(ImplicitCode);
+}
+
+static void adjustColumn(unsigned &Column) {
+ // Set to unknown on overflow. We only have 16 bits to play with here.
+ if (Column >= (1u << 16))
+ Column = 0;
+}
+
+DILocation *DILocation::getImpl(LLVMContext &Context, unsigned Line,
+ unsigned Column, Metadata *Scope,
+ Metadata *InlinedAt, bool ImplicitCode,
+ StorageType Storage, bool ShouldCreate) {
+ // Fixup column.
+ adjustColumn(Column);
+
+ if (Storage == Uniqued) {
+ if (auto *N = getUniqued(Context.pImpl->DILocations,
+ DILocationInfo::KeyTy(Line, Column, Scope,
+ InlinedAt, ImplicitCode)))
+ return N;
+ if (!ShouldCreate)
+ return nullptr;
+ } else {
+ assert(ShouldCreate && "Expected non-uniqued nodes to always be created");
+ }
+
+ SmallVector<Metadata *, 2> Ops;
+ Ops.push_back(Scope);
+ if (InlinedAt)
+ Ops.push_back(InlinedAt);
+ return storeImpl(new (Ops.size(), Storage) DILocation(
+ Context, Storage, Line, Column, Ops, ImplicitCode),
+ Storage, Context.pImpl->DILocations);
+}
+
+const DILocation *
+DILocation::getMergedLocations(ArrayRef<const DILocation *> Locs) {
+ if (Locs.empty())
+ return nullptr;
+ if (Locs.size() == 1)
+ return Locs[0];
+ auto *Merged = Locs[0];
+ for (const DILocation *L : llvm::drop_begin(Locs)) {
+ Merged = getMergedLocation(Merged, L);
+ if (Merged == nullptr)
+ break;
+ }
+ return Merged;
+}
+
+const DILocation *DILocation::getMergedLocation(const DILocation *LocA,
+ const DILocation *LocB) {
+ if (!LocA || !LocB)
+ return nullptr;
+
+ if (LocA == LocB)
+ return LocA;
+
+ SmallPtrSet<DILocation *, 5> InlinedLocationsA;
+ for (DILocation *L = LocA->getInlinedAt(); L; L = L->getInlinedAt())
+ InlinedLocationsA.insert(L);
+ SmallSet<std::pair<DIScope *, DILocation *>, 5> Locations;
+ DIScope *S = LocA->getScope();
+ DILocation *L = LocA->getInlinedAt();
+ while (S) {
+ Locations.insert(std::make_pair(S, L));
+ S = S->getScope();
+ if (!S && L) {
+ S = L->getScope();
+ L = L->getInlinedAt();
+ }
+ }
+ const DILocation *Result = LocB;
+ S = LocB->getScope();
+ L = LocB->getInlinedAt();
+ while (S) {
+ if (Locations.count(std::make_pair(S, L)))
+ break;
+ S = S->getScope();
+ if (!S && L) {
+ S = L->getScope();
+ L = L->getInlinedAt();
+ }
+ }
+
+ // If the two locations are irreconsilable, just pick one. This is misleading,
+ // but on the other hand, it's a "line 0" location.
+ if (!S || !isa<DILocalScope>(S))
+ S = LocA->getScope();
+ return DILocation::get(Result->getContext(), 0, 0, S, L);
+}
+
+Optional<unsigned> DILocation::encodeDiscriminator(unsigned BD, unsigned DF,
+ unsigned CI) {
+ std::array<unsigned, 3> Components = {BD, DF, CI};
+ uint64_t RemainingWork = 0U;
+ // We use RemainingWork to figure out if we have no remaining components to
+ // encode. For example: if BD != 0 but DF == 0 && CI == 0, we don't need to
+ // encode anything for the latter 2.
+ // Since any of the input components is at most 32 bits, their sum will be
+ // less than 34 bits, and thus RemainingWork won't overflow.
+ RemainingWork =
+ std::accumulate(Components.begin(), Components.end(), RemainingWork);
+
+ int I = 0;
+ unsigned Ret = 0;
+ unsigned NextBitInsertionIndex = 0;
+ while (RemainingWork > 0) {
+ unsigned C = Components[I++];
+ RemainingWork -= C;
+ unsigned EC = encodeComponent(C);
+ Ret |= (EC << NextBitInsertionIndex);
+ NextBitInsertionIndex += encodingBits(C);
+ }
+
+ // Encoding may be unsuccessful because of overflow. We determine success by
+ // checking equivalence of components before & after encoding. Alternatively,
+ // we could determine Success during encoding, but the current alternative is
+ // simpler.
+ unsigned TBD, TDF, TCI = 0;
+ decodeDiscriminator(Ret, TBD, TDF, TCI);
+ if (TBD == BD && TDF == DF && TCI == CI)
+ return Ret;
+ return None;
+}
+
+void DILocation::decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF,
+ unsigned &CI) {
+ BD = getUnsignedFromPrefixEncoding(D);
+ DF = getUnsignedFromPrefixEncoding(getNextComponentInDiscriminator(D));
+ CI = getUnsignedFromPrefixEncoding(
+ getNextComponentInDiscriminator(getNextComponentInDiscriminator(D)));
+}
+dwarf::Tag DINode::getTag() const { return (dwarf::Tag)SubclassData16; }
+
+DINode::DIFlags DINode::getFlag(StringRef Flag) {
+ return StringSwitch<DIFlags>(Flag)
+#define HANDLE_DI_FLAG(ID, NAME) .Case("DIFlag" #NAME, Flag##NAME)
+#include "llvm/IR/DebugInfoFlags.def"
+ .Default(DINode::FlagZero);
+}
+
+StringRef DINode::getFlagString(DIFlags Flag) {
+ switch (Flag) {
+#define HANDLE_DI_FLAG(ID, NAME) \
+ case Flag##NAME: \
+ return "DIFlag" #NAME;
+#include "llvm/IR/DebugInfoFlags.def"
+ }
+ return "";
+}
+
+DINode::DIFlags DINode::splitFlags(DIFlags Flags,
+ SmallVectorImpl<DIFlags> &SplitFlags) {
+ // Flags that are packed together need to be specially handled, so
+ // that, for example, we emit "DIFlagPublic" and not
+ // "DIFlagPrivate | DIFlagProtected".
+ if (DIFlags A = Flags & FlagAccessibility) {
+ if (A == FlagPrivate)
+ SplitFlags.push_back(FlagPrivate);
+ else if (A == FlagProtected)
+ SplitFlags.push_back(FlagProtected);
+ else
+ SplitFlags.push_back(FlagPublic);
+ Flags &= ~A;
+ }
+ if (DIFlags R = Flags & FlagPtrToMemberRep) {
+ if (R == FlagSingleInheritance)
+ SplitFlags.push_back(FlagSingleInheritance);
+ else if (R == FlagMultipleInheritance)
+ SplitFlags.push_back(FlagMultipleInheritance);
+ else
+ SplitFlags.push_back(FlagVirtualInheritance);
+ Flags &= ~R;
+ }
+ if ((Flags & FlagIndirectVirtualBase) == FlagIndirectVirtualBase) {
+ Flags &= ~FlagIndirectVirtualBase;
+ SplitFlags.push_back(FlagIndirectVirtualBase);
+ }
+
+#define HANDLE_DI_FLAG(ID, NAME) \
+ if (DIFlags Bit = Flags & Flag##NAME) { \
+ SplitFlags.push_back(Bit); \
+ Flags &= ~Bit; \
+ }
+#include "llvm/IR/DebugInfoFlags.def"
+ return Flags;
+}
+
+DIScope *DIScope::getScope() const {
+ if (auto *T = dyn_cast<DIType>(this))
+ return T->getScope();
+
+ if (auto *SP = dyn_cast<DISubprogram>(this))
+ return SP->getScope();
+
+ if (auto *LB = dyn_cast<DILexicalBlockBase>(this))
+ return LB->getScope();
+
+ if (auto *NS = dyn_cast<DINamespace>(this))
+ return NS->getScope();
+
+ if (auto *CB = dyn_cast<DICommonBlock>(this))
+ return CB->getScope();
+
+ if (auto *M = dyn_cast<DIModule>(this))
+ return M->getScope();
+
+ assert((isa<DIFile>(this) || isa<DICompileUnit>(this)) &&
+ "Unhandled type of scope.");
+ return nullptr;
+}
+
+StringRef DIScope::getName() const {
+ if (auto *T = dyn_cast<DIType>(this))
+ return T->getName();
+ if (auto *SP = dyn_cast<DISubprogram>(this))
+ return SP->getName();
+ if (auto *NS = dyn_cast<DINamespace>(this))
+ return NS->getName();
+ if (auto *CB = dyn_cast<DICommonBlock>(this))
+ return CB->getName();
+ if (auto *M = dyn_cast<DIModule>(this))
+ return M->getName();
+ assert((isa<DILexicalBlockBase>(this) || isa<DIFile>(this) ||
+ isa<DICompileUnit>(this)) &&
+ "Unhandled type of scope.");
+ return "";
+}
+
+#ifndef NDEBUG
+static bool isCanonical(const MDString *S) {
+ return !S || !S->getString().empty();
+}
+#endif
+
+dwarf::Tag GenericDINode::getTag() const { return (dwarf::Tag)SubclassData16; }
+GenericDINode *GenericDINode::getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Header,
+ ArrayRef<Metadata *> DwarfOps,
+ StorageType Storage, bool ShouldCreate) {
+ unsigned Hash = 0;
+ if (Storage == Uniqued) {
+ GenericDINodeInfo::KeyTy Key(Tag, Header, DwarfOps);
+ if (auto *N = getUniqued(Context.pImpl->GenericDINodes, Key))
+ return N;
+ if (!ShouldCreate)
+ return nullptr;
+ Hash = Key.getHash();
+ } else {
+ assert(ShouldCreate && "Expected non-uniqued nodes to always be created");
+ }
+
+ // Use a nullptr for empty headers.
+ assert(isCanonical(Header) && "Expected canonical MDString");
+ Metadata *PreOps[] = {Header};
+ return storeImpl(new (DwarfOps.size() + 1, Storage) GenericDINode(
+ Context, Storage, Hash, Tag, PreOps, DwarfOps),
+ Storage, Context.pImpl->GenericDINodes);
+}
+
+void GenericDINode::recalculateHash() {
+ setHash(GenericDINodeInfo::KeyTy::calculateHash(this));
+}
+
+#define UNWRAP_ARGS_IMPL(...) __VA_ARGS__
+#define UNWRAP_ARGS(ARGS) UNWRAP_ARGS_IMPL ARGS
+#define DEFINE_GETIMPL_LOOKUP(CLASS, ARGS) \
+ do { \
+ if (Storage == Uniqued) { \
+ if (auto *N = getUniqued(Context.pImpl->CLASS##s, \
+ CLASS##Info::KeyTy(UNWRAP_ARGS(ARGS)))) \
+ return N; \
+ if (!ShouldCreate) \
+ return nullptr; \
+ } else { \
+ assert(ShouldCreate && \
+ "Expected non-uniqued nodes to always be created"); \
+ } \
+ } while (false)
+#define DEFINE_GETIMPL_STORE(CLASS, ARGS, OPS) \
+ return storeImpl(new (array_lengthof(OPS), Storage) \
+ CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \
+ Storage, Context.pImpl->CLASS##s)
+#define DEFINE_GETIMPL_STORE_NO_OPS(CLASS, ARGS) \
+ return storeImpl(new (0u, Storage) \
+ CLASS(Context, Storage, UNWRAP_ARGS(ARGS)), \
+ Storage, Context.pImpl->CLASS##s)
+#define DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(CLASS, OPS) \
+ return storeImpl(new (array_lengthof(OPS), Storage) \
+ CLASS(Context, Storage, OPS), \
+ Storage, Context.pImpl->CLASS##s)
+#define DEFINE_GETIMPL_STORE_N(CLASS, ARGS, OPS, NUM_OPS) \
+ return storeImpl(new (NUM_OPS, Storage) \
+ CLASS(Context, Storage, UNWRAP_ARGS(ARGS), OPS), \
+ Storage, Context.pImpl->CLASS##s)
+
+DISubrange::DISubrange(LLVMContext &C, StorageType Storage,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, DISubrangeKind, Storage, dwarf::DW_TAG_subrange_type, Ops) {}
+DISubrange *DISubrange::getImpl(LLVMContext &Context, int64_t Count, int64_t Lo,
+ StorageType Storage, bool ShouldCreate) {
+ auto *CountNode = ConstantAsMetadata::get(
+ ConstantInt::getSigned(Type::getInt64Ty(Context), Count));
+ auto *LB = ConstantAsMetadata::get(
+ ConstantInt::getSigned(Type::getInt64Ty(Context), Lo));
+ return getImpl(Context, CountNode, LB, nullptr, nullptr, Storage,
+ ShouldCreate);
+}
+
+DISubrange *DISubrange::getImpl(LLVMContext &Context, Metadata *CountNode,
+ int64_t Lo, StorageType Storage,
+ bool ShouldCreate) {
+ auto *LB = ConstantAsMetadata::get(
+ ConstantInt::getSigned(Type::getInt64Ty(Context), Lo));
+ return getImpl(Context, CountNode, LB, nullptr, nullptr, Storage,
+ ShouldCreate);
+}
+
+DISubrange *DISubrange::getImpl(LLVMContext &Context, Metadata *CountNode,
+ Metadata *LB, Metadata *UB, Metadata *Stride,
+ StorageType Storage, bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DISubrange, (CountNode, LB, UB, Stride));
+ Metadata *Ops[] = {CountNode, LB, UB, Stride};
+ DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DISubrange, Ops);
+}
+
+DISubrange::BoundType DISubrange::getCount() const {
+ Metadata *CB = getRawCountNode();
+ if (!CB)
+ return BoundType();
+
+ assert((isa<ConstantAsMetadata>(CB) || isa<DIVariable>(CB) ||
+ isa<DIExpression>(CB)) &&
+ "Count must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<ConstantAsMetadata>(CB))
+ return BoundType(cast<ConstantInt>(MD->getValue()));
+
+ if (auto *MD = dyn_cast<DIVariable>(CB))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(CB))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DISubrange::BoundType DISubrange::getLowerBound() const {
+ Metadata *LB = getRawLowerBound();
+ if (!LB)
+ return BoundType();
+
+ assert((isa<ConstantAsMetadata>(LB) || isa<DIVariable>(LB) ||
+ isa<DIExpression>(LB)) &&
+ "LowerBound must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<ConstantAsMetadata>(LB))
+ return BoundType(cast<ConstantInt>(MD->getValue()));
+
+ if (auto *MD = dyn_cast<DIVariable>(LB))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(LB))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DISubrange::BoundType DISubrange::getUpperBound() const {
+ Metadata *UB = getRawUpperBound();
+ if (!UB)
+ return BoundType();
+
+ assert((isa<ConstantAsMetadata>(UB) || isa<DIVariable>(UB) ||
+ isa<DIExpression>(UB)) &&
+ "UpperBound must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<ConstantAsMetadata>(UB))
+ return BoundType(cast<ConstantInt>(MD->getValue()));
+
+ if (auto *MD = dyn_cast<DIVariable>(UB))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(UB))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DISubrange::BoundType DISubrange::getStride() const {
+ Metadata *ST = getRawStride();
+ if (!ST)
+ return BoundType();
+
+ assert((isa<ConstantAsMetadata>(ST) || isa<DIVariable>(ST) ||
+ isa<DIExpression>(ST)) &&
+ "Stride must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<ConstantAsMetadata>(ST))
+ return BoundType(cast<ConstantInt>(MD->getValue()));
+
+ if (auto *MD = dyn_cast<DIVariable>(ST))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(ST))
+ return BoundType(MD);
+
+ return BoundType();
+}
+DIGenericSubrange::DIGenericSubrange(LLVMContext &C, StorageType Storage,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, DIGenericSubrangeKind, Storage, dwarf::DW_TAG_generic_subrange,
+ Ops) {}
+
+DIGenericSubrange *DIGenericSubrange::getImpl(LLVMContext &Context,
+ Metadata *CountNode, Metadata *LB,
+ Metadata *UB, Metadata *Stride,
+ StorageType Storage,
+ bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIGenericSubrange, (CountNode, LB, UB, Stride));
+ Metadata *Ops[] = {CountNode, LB, UB, Stride};
+ DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DIGenericSubrange, Ops);
+}
+
+DIGenericSubrange::BoundType DIGenericSubrange::getCount() const {
+ Metadata *CB = getRawCountNode();
+ if (!CB)
+ return BoundType();
+
+ assert((isa<DIVariable>(CB) || isa<DIExpression>(CB)) &&
+ "Count must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<DIVariable>(CB))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(CB))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DIGenericSubrange::BoundType DIGenericSubrange::getLowerBound() const {
+ Metadata *LB = getRawLowerBound();
+ if (!LB)
+ return BoundType();
+
+ assert((isa<DIVariable>(LB) || isa<DIExpression>(LB)) &&
+ "LowerBound must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<DIVariable>(LB))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(LB))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DIGenericSubrange::BoundType DIGenericSubrange::getUpperBound() const {
+ Metadata *UB = getRawUpperBound();
+ if (!UB)
+ return BoundType();
+
+ assert((isa<DIVariable>(UB) || isa<DIExpression>(UB)) &&
+ "UpperBound must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<DIVariable>(UB))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(UB))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DIGenericSubrange::BoundType DIGenericSubrange::getStride() const {
+ Metadata *ST = getRawStride();
+ if (!ST)
+ return BoundType();
+
+ assert((isa<DIVariable>(ST) || isa<DIExpression>(ST)) &&
+ "Stride must be signed constant or DIVariable or DIExpression");
+
+ if (auto *MD = dyn_cast<DIVariable>(ST))
+ return BoundType(MD);
+
+ if (auto *MD = dyn_cast<DIExpression>(ST))
+ return BoundType(MD);
+
+ return BoundType();
+}
+
+DIEnumerator::DIEnumerator(LLVMContext &C, StorageType Storage,
+ const APInt &Value, bool IsUnsigned,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, DIEnumeratorKind, Storage, dwarf::DW_TAG_enumerator, Ops),
+ Value(Value) {
+ SubclassData32 = IsUnsigned;
+}
+DIEnumerator *DIEnumerator::getImpl(LLVMContext &Context, const APInt &Value,
+ bool IsUnsigned, MDString *Name,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIEnumerator, (Value, IsUnsigned, Name));
+ Metadata *Ops[] = {Name};
+ DEFINE_GETIMPL_STORE(DIEnumerator, (Value, IsUnsigned), Ops);
+}
+
+DIBasicType *DIBasicType::getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Name, uint64_t SizeInBits,
+ uint32_t AlignInBits, unsigned Encoding,
+ DIFlags Flags, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIBasicType,
+ (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags));
+ Metadata *Ops[] = {nullptr, nullptr, Name};
+ DEFINE_GETIMPL_STORE(DIBasicType,
+ (Tag, SizeInBits, AlignInBits, Encoding, Flags), Ops);
+}
+
+Optional<DIBasicType::Signedness> DIBasicType::getSignedness() const {
+ switch (getEncoding()) {
+ case dwarf::DW_ATE_signed:
+ case dwarf::DW_ATE_signed_char:
+ return Signedness::Signed;
+ case dwarf::DW_ATE_unsigned:
+ case dwarf::DW_ATE_unsigned_char:
+ return Signedness::Unsigned;
+ default:
+ return None;
+ }
+}
+
+DIStringType *DIStringType::getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Name, Metadata *StringLength,
+ Metadata *StringLengthExp,
+ Metadata *StringLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits,
+ unsigned Encoding, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIStringType,
+ (Tag, Name, StringLength, StringLengthExp,
+ StringLocationExp, SizeInBits, AlignInBits, Encoding));
+ Metadata *Ops[] = {nullptr, nullptr, Name,
+ StringLength, StringLengthExp, StringLocationExp};
+ DEFINE_GETIMPL_STORE(DIStringType, (Tag, SizeInBits, AlignInBits, Encoding),
+ Ops);
+}
+DIType *DIDerivedType::getClassType() const {
+ assert(getTag() == dwarf::DW_TAG_ptr_to_member_type);
+ return cast_or_null<DIType>(getExtraData());
+}
+uint32_t DIDerivedType::getVBPtrOffset() const {
+ assert(getTag() == dwarf::DW_TAG_inheritance);
+ if (auto *CM = cast_or_null<ConstantAsMetadata>(getExtraData()))
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(CM->getValue()))
+ return static_cast<uint32_t>(CI->getZExtValue());
+ return 0;
+}
+Constant *DIDerivedType::getStorageOffsetInBits() const {
+ assert(getTag() == dwarf::DW_TAG_member && isBitField());
+ if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+ return C->getValue();
+ return nullptr;
+}
+
+Constant *DIDerivedType::getConstant() const {
+ assert(getTag() == dwarf::DW_TAG_member && isStaticMember());
+ if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+ return C->getValue();
+ return nullptr;
+}
+Constant *DIDerivedType::getDiscriminantValue() const {
+ assert(getTag() == dwarf::DW_TAG_member && !isStaticMember());
+ if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+ return C->getValue();
+ return nullptr;
+}
+
+DIDerivedType *DIDerivedType::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ Optional<unsigned> DWARFAddressSpace, DIFlags Flags, Metadata *ExtraData,
+ Metadata *Annotations, StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIDerivedType,
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
+ ExtraData, Annotations));
+ Metadata *Ops[] = {File, Scope, Name, BaseType, ExtraData, Annotations};
+ DEFINE_GETIMPL_STORE(DIDerivedType,
+ (Tag, Line, SizeInBits, AlignInBits, OffsetInBits,
+ DWARFAddressSpace, Flags),
+ Ops);
+}
+
+DICompositeType *DICompositeType::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
+ Metadata *Elements, unsigned RuntimeLang, Metadata *VTableHolder,
+ Metadata *TemplateParams, MDString *Identifier, Metadata *Discriminator,
+ Metadata *DataLocation, Metadata *Associated, Metadata *Allocated,
+ Metadata *Rank, Metadata *Annotations, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+
+ // Keep this in sync with buildODRType.
+ DEFINE_GETIMPL_LOOKUP(DICompositeType,
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Elements,
+ RuntimeLang, VTableHolder, TemplateParams, Identifier,
+ Discriminator, DataLocation, Associated, Allocated,
+ Rank, Annotations));
+ Metadata *Ops[] = {File, Scope, Name, BaseType,
+ Elements, VTableHolder, TemplateParams, Identifier,
+ Discriminator, DataLocation, Associated, Allocated,
+ Rank, Annotations};
+ DEFINE_GETIMPL_STORE(
+ DICompositeType,
+ (Tag, Line, RuntimeLang, SizeInBits, AlignInBits, OffsetInBits, Flags),
+ Ops);
+}
+
+DICompositeType *DICompositeType::buildODRType(
+ LLVMContext &Context, MDString &Identifier, unsigned Tag, MDString *Name,
+ Metadata *File, unsigned Line, Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DIFlags Flags, Metadata *Elements, unsigned RuntimeLang,
+ Metadata *VTableHolder, Metadata *TemplateParams, Metadata *Discriminator,
+ Metadata *DataLocation, Metadata *Associated, Metadata *Allocated,
+ Metadata *Rank, Metadata *Annotations) {
+ assert(!Identifier.getString().empty() && "Expected valid identifier");
+ if (!Context.isODRUniquingDebugTypes())
+ return nullptr;
+ auto *&CT = (*Context.pImpl->DITypeMap)[&Identifier];
+ if (!CT)
+ return CT = DICompositeType::getDistinct(
+ Context, Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+ VTableHolder, TemplateParams, &Identifier, Discriminator,
+ DataLocation, Associated, Allocated, Rank, Annotations);
+
+ if (CT->getTag() != Tag)
+ return nullptr;
+
+ // Only mutate CT if it's a forward declaration and the new operands aren't.
+ assert(CT->getRawIdentifier() == &Identifier && "Wrong ODR identifier?");
+ if (!CT->isForwardDecl() || (Flags & DINode::FlagFwdDecl))
+ return CT;
+
+ // Mutate CT in place. Keep this in sync with getImpl.
+ CT->mutate(Tag, Line, RuntimeLang, SizeInBits, AlignInBits, OffsetInBits,
+ Flags);
+ Metadata *Ops[] = {File, Scope, Name, BaseType,
+ Elements, VTableHolder, TemplateParams, &Identifier,
+ Discriminator, DataLocation, Associated, Allocated,
+ Rank, Annotations};
+ assert((std::end(Ops) - std::begin(Ops)) == (int)CT->getNumOperands() &&
+ "Mismatched number of operands");
+ for (unsigned I = 0, E = CT->getNumOperands(); I != E; ++I)
+ if (Ops[I] != CT->getOperand(I))
+ CT->setOperand(I, Ops[I]);
+ return CT;
+}
+
+DICompositeType *DICompositeType::getODRType(
+ LLVMContext &Context, MDString &Identifier, unsigned Tag, MDString *Name,
+ Metadata *File, unsigned Line, Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ DIFlags Flags, Metadata *Elements, unsigned RuntimeLang,
+ Metadata *VTableHolder, Metadata *TemplateParams, Metadata *Discriminator,
+ Metadata *DataLocation, Metadata *Associated, Metadata *Allocated,
+ Metadata *Rank, Metadata *Annotations) {
+ assert(!Identifier.getString().empty() && "Expected valid identifier");
+ if (!Context.isODRUniquingDebugTypes())
+ return nullptr;
+ auto *&CT = (*Context.pImpl->DITypeMap)[&Identifier];
+ if (!CT) {
+ CT = DICompositeType::getDistinct(
+ Context, Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang, VTableHolder,
+ TemplateParams, &Identifier, Discriminator, DataLocation, Associated,
+ Allocated, Rank, Annotations);
+ } else {
+ if (CT->getTag() != Tag)
+ return nullptr;
+ }
+ return CT;
+}
+
+DICompositeType *DICompositeType::getODRTypeIfExists(LLVMContext &Context,
+ MDString &Identifier) {
+ assert(!Identifier.getString().empty() && "Expected valid identifier");
+ if (!Context.isODRUniquingDebugTypes())
+ return nullptr;
+ return Context.pImpl->DITypeMap->lookup(&Identifier);
+}
+DISubroutineType::DISubroutineType(LLVMContext &C, StorageType Storage,
+ DIFlags Flags, uint8_t CC,
+ ArrayRef<Metadata *> Ops)
+ : DIType(C, DISubroutineTypeKind, Storage, dwarf::DW_TAG_subroutine_type, 0,
+ 0, 0, 0, Flags, Ops),
+ CC(CC) {}
+
+DISubroutineType *DISubroutineType::getImpl(LLVMContext &Context, DIFlags Flags,
+ uint8_t CC, Metadata *TypeArray,
+ StorageType Storage,
+ bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DISubroutineType, (Flags, CC, TypeArray));
+ Metadata *Ops[] = {nullptr, nullptr, nullptr, TypeArray};
+ DEFINE_GETIMPL_STORE(DISubroutineType, (Flags, CC), Ops);
+}
+
+DIFile::DIFile(LLVMContext &C, StorageType Storage,
+ Optional<ChecksumInfo<MDString *>> CS, Optional<MDString *> Src,
+ ArrayRef<Metadata *> Ops)
+ : DIScope(C, DIFileKind, Storage, dwarf::DW_TAG_file_type, Ops),
+ Checksum(CS), Source(Src) {}
+
+// FIXME: Implement this string-enum correspondence with a .def file and macros,
+// so that the association is explicit rather than implied.
+static const char *ChecksumKindName[DIFile::CSK_Last] = {
+ "CSK_MD5",
+ "CSK_SHA1",
+ "CSK_SHA256",
+};
+
+StringRef DIFile::getChecksumKindAsString(ChecksumKind CSKind) {
+ assert(CSKind <= DIFile::CSK_Last && "Invalid checksum kind");
+ // The first space was originally the CSK_None variant, which is now
+ // obsolete, but the space is still reserved in ChecksumKind, so we account
+ // for it here.
+ return ChecksumKindName[CSKind - 1];
+}
+
+Optional<DIFile::ChecksumKind> DIFile::getChecksumKind(StringRef CSKindStr) {
+ return StringSwitch<Optional<DIFile::ChecksumKind>>(CSKindStr)
+ .Case("CSK_MD5", DIFile::CSK_MD5)
+ .Case("CSK_SHA1", DIFile::CSK_SHA1)
+ .Case("CSK_SHA256", DIFile::CSK_SHA256)
+ .Default(None);
+}
+
+DIFile *DIFile::getImpl(LLVMContext &Context, MDString *Filename,
+ MDString *Directory,
+ Optional<DIFile::ChecksumInfo<MDString *>> CS,
+ Optional<MDString *> Source, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Filename) && "Expected canonical MDString");
+ assert(isCanonical(Directory) && "Expected canonical MDString");
+ assert((!CS || isCanonical(CS->Value)) && "Expected canonical MDString");
+ assert((!Source || isCanonical(*Source)) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIFile, (Filename, Directory, CS, Source));
+ Metadata *Ops[] = {Filename, Directory, CS ? CS->Value : nullptr,
+ Source.value_or(nullptr)};
+ DEFINE_GETIMPL_STORE(DIFile, (CS, Source), Ops);
+}
+DICompileUnit::DICompileUnit(LLVMContext &C, StorageType Storage,
+ unsigned SourceLanguage, bool IsOptimized,
+ unsigned RuntimeVersion, unsigned EmissionKind,
+ uint64_t DWOId, bool SplitDebugInlining,
+ bool DebugInfoForProfiling, unsigned NameTableKind,
+ bool RangesBaseAddress, ArrayRef<Metadata *> Ops)
+ : DIScope(C, DICompileUnitKind, Storage, dwarf::DW_TAG_compile_unit, Ops),
+ SourceLanguage(SourceLanguage), IsOptimized(IsOptimized),
+ RuntimeVersion(RuntimeVersion), EmissionKind(EmissionKind), DWOId(DWOId),
+ SplitDebugInlining(SplitDebugInlining),
+ DebugInfoForProfiling(DebugInfoForProfiling),
+ NameTableKind(NameTableKind), RangesBaseAddress(RangesBaseAddress) {
+ assert(Storage != Uniqued);
+}
+
+DICompileUnit *DICompileUnit::getImpl(
+ LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
+ MDString *Producer, bool IsOptimized, MDString *Flags,
+ unsigned RuntimeVersion, MDString *SplitDebugFilename,
+ unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
+ Metadata *GlobalVariables, Metadata *ImportedEntities, Metadata *Macros,
+ uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
+ unsigned NameTableKind, bool RangesBaseAddress, MDString *SysRoot,
+ MDString *SDK, StorageType Storage, bool ShouldCreate) {
+ assert(Storage != Uniqued && "Cannot unique DICompileUnit");
+ assert(isCanonical(Producer) && "Expected canonical MDString");
+ assert(isCanonical(Flags) && "Expected canonical MDString");
+ assert(isCanonical(SplitDebugFilename) && "Expected canonical MDString");
+
+ Metadata *Ops[] = {File,
+ Producer,
+ Flags,
+ SplitDebugFilename,
+ EnumTypes,
+ RetainedTypes,
+ GlobalVariables,
+ ImportedEntities,
+ Macros,
+ SysRoot,
+ SDK};
+ return storeImpl(new (array_lengthof(Ops), Storage) DICompileUnit(
+ Context, Storage, SourceLanguage, IsOptimized,
+ RuntimeVersion, EmissionKind, DWOId, SplitDebugInlining,
+ DebugInfoForProfiling, NameTableKind, RangesBaseAddress,
+ Ops),
+ Storage);
+}
+
+Optional<DICompileUnit::DebugEmissionKind>
+DICompileUnit::getEmissionKind(StringRef Str) {
+ return StringSwitch<Optional<DebugEmissionKind>>(Str)
+ .Case("NoDebug", NoDebug)
+ .Case("FullDebug", FullDebug)
+ .Case("LineTablesOnly", LineTablesOnly)
+ .Case("DebugDirectivesOnly", DebugDirectivesOnly)
+ .Default(None);
+}
+
+Optional<DICompileUnit::DebugNameTableKind>
+DICompileUnit::getNameTableKind(StringRef Str) {
+ return StringSwitch<Optional<DebugNameTableKind>>(Str)
+ .Case("Default", DebugNameTableKind::Default)
+ .Case("GNU", DebugNameTableKind::GNU)
+ .Case("None", DebugNameTableKind::None)
+ .Default(None);
+}
+
+const char *DICompileUnit::emissionKindString(DebugEmissionKind EK) {
+ switch (EK) {
+ case NoDebug:
+ return "NoDebug";
+ case FullDebug:
+ return "FullDebug";
+ case LineTablesOnly:
+ return "LineTablesOnly";
+ case DebugDirectivesOnly:
+ return "DebugDirectivesOnly";
+ }
+ return nullptr;
+}
+
+const char *DICompileUnit::nameTableKindString(DebugNameTableKind NTK) {
+ switch (NTK) {
+ case DebugNameTableKind::Default:
+ return nullptr;
+ case DebugNameTableKind::GNU:
+ return "GNU";
+ case DebugNameTableKind::None:
+ return "None";
+ }
+ return nullptr;
+}
+DISubprogram::DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned ScopeLine, unsigned VirtualIndex,
+ int ThisAdjustment, DIFlags Flags, DISPFlags SPFlags,
+ ArrayRef<Metadata *> Ops)
+ : DILocalScope(C, DISubprogramKind, Storage, dwarf::DW_TAG_subprogram, Ops),
+ Line(Line), ScopeLine(ScopeLine), VirtualIndex(VirtualIndex),
+ ThisAdjustment(ThisAdjustment), Flags(Flags), SPFlags(SPFlags) {
+ static_assert(dwarf::DW_VIRTUALITY_max < 4, "Virtuality out of range");
+}
+DISubprogram::DISPFlags
+DISubprogram::toSPFlags(bool IsLocalToUnit, bool IsDefinition, bool IsOptimized,
+ unsigned Virtuality, bool IsMainSubprogram) {
+ // We're assuming virtuality is the low-order field.
+ static_assert(int(SPFlagVirtual) == int(dwarf::DW_VIRTUALITY_virtual) &&
+ int(SPFlagPureVirtual) ==
+ int(dwarf::DW_VIRTUALITY_pure_virtual),
+ "Virtuality constant mismatch");
+ return static_cast<DISPFlags>(
+ (Virtuality & SPFlagVirtuality) |
+ (IsLocalToUnit ? SPFlagLocalToUnit : SPFlagZero) |
+ (IsDefinition ? SPFlagDefinition : SPFlagZero) |
+ (IsOptimized ? SPFlagOptimized : SPFlagZero) |
+ (IsMainSubprogram ? SPFlagMainSubprogram : SPFlagZero));
+}
+
+DISubprogram *DILocalScope::getSubprogram() const {
+ if (auto *Block = dyn_cast<DILexicalBlockBase>(this))
+ return Block->getScope()->getSubprogram();
+ return const_cast<DISubprogram *>(cast<DISubprogram>(this));
+}
+
+DILocalScope *DILocalScope::getNonLexicalBlockFileScope() const {
+ if (auto *File = dyn_cast<DILexicalBlockFile>(this))
+ return File->getScope()->getNonLexicalBlockFileScope();
+ return const_cast<DILocalScope *>(this);
+}
+
+DISubprogram::DISPFlags DISubprogram::getFlag(StringRef Flag) {
+ return StringSwitch<DISPFlags>(Flag)
+#define HANDLE_DISP_FLAG(ID, NAME) .Case("DISPFlag" #NAME, SPFlag##NAME)
+#include "llvm/IR/DebugInfoFlags.def"
+ .Default(SPFlagZero);
+}
+
+StringRef DISubprogram::getFlagString(DISPFlags Flag) {
+ switch (Flag) {
+ // Appease a warning.
+ case SPFlagVirtuality:
+ return "";
+#define HANDLE_DISP_FLAG(ID, NAME) \
+ case SPFlag##NAME: \
+ return "DISPFlag" #NAME;
+#include "llvm/IR/DebugInfoFlags.def"
+ }
+ return "";
+}
+
+DISubprogram::DISPFlags
+DISubprogram::splitFlags(DISPFlags Flags,
+ SmallVectorImpl<DISPFlags> &SplitFlags) {
+ // Multi-bit fields can require special handling. In our case, however, the
+ // only multi-bit field is virtuality, and all its values happen to be
+ // single-bit values, so the right behavior just falls out.
+#define HANDLE_DISP_FLAG(ID, NAME) \
+ if (DISPFlags Bit = Flags & SPFlag##NAME) { \
+ SplitFlags.push_back(Bit); \
+ Flags &= ~Bit; \
+ }
+#include "llvm/IR/DebugInfoFlags.def"
+ return Flags;
+}
+
+DISubprogram *DISubprogram::getImpl(
+ LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+ unsigned ScopeLine, Metadata *ContainingType, unsigned VirtualIndex,
+ int ThisAdjustment, DIFlags Flags, DISPFlags SPFlags, Metadata *Unit,
+ Metadata *TemplateParams, Metadata *Declaration, Metadata *RetainedNodes,
+ Metadata *ThrownTypes, Metadata *Annotations, MDString *TargetFuncName,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ assert(isCanonical(LinkageName) && "Expected canonical MDString");
+ assert(isCanonical(TargetFuncName) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DISubprogram,
+ (Scope, Name, LinkageName, File, Line, Type, ScopeLine,
+ ContainingType, VirtualIndex, ThisAdjustment, Flags,
+ SPFlags, Unit, TemplateParams, Declaration,
+ RetainedNodes, ThrownTypes, Annotations,
+ TargetFuncName));
+ SmallVector<Metadata *, 13> Ops = {
+ File, Scope, Name, LinkageName,
+ Type, Unit, Declaration, RetainedNodes,
+ ContainingType, TemplateParams, ThrownTypes, Annotations,
+ TargetFuncName};
+ if (!TargetFuncName) {
+ Ops.pop_back();
+ if (!Annotations) {
+ Ops.pop_back();
+ if (!ThrownTypes) {
+ Ops.pop_back();
+ if (!TemplateParams) {
+ Ops.pop_back();
+ if (!ContainingType)
+ Ops.pop_back();
+ }
+ }
+ }
+ }
+ DEFINE_GETIMPL_STORE_N(
+ DISubprogram,
+ (Line, ScopeLine, VirtualIndex, ThisAdjustment, Flags, SPFlags), Ops,
+ Ops.size());
+}
+
+bool DISubprogram::describes(const Function *F) const {
+ assert(F && "Invalid function");
+ return F->getSubprogram() == this;
+}
+DILexicalBlockBase::DILexicalBlockBase(LLVMContext &C, unsigned ID,
+ StorageType Storage,
+ ArrayRef<Metadata *> Ops)
+ : DILocalScope(C, ID, Storage, dwarf::DW_TAG_lexical_block, Ops) {}
+
+DILexicalBlock *DILexicalBlock::getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *File, unsigned Line,
+ unsigned Column, StorageType Storage,
+ bool ShouldCreate) {
+ // Fixup column.
+ adjustColumn(Column);
+
+ assert(Scope && "Expected scope");
+ DEFINE_GETIMPL_LOOKUP(DILexicalBlock, (Scope, File, Line, Column));
+ Metadata *Ops[] = {File, Scope};
+ DEFINE_GETIMPL_STORE(DILexicalBlock, (Line, Column), Ops);
+}
+
+DILexicalBlockFile *DILexicalBlockFile::getImpl(LLVMContext &Context,
+ Metadata *Scope, Metadata *File,
+ unsigned Discriminator,
+ StorageType Storage,
+ bool ShouldCreate) {
+ assert(Scope && "Expected scope");
+ DEFINE_GETIMPL_LOOKUP(DILexicalBlockFile, (Scope, File, Discriminator));
+ Metadata *Ops[] = {File, Scope};
+ DEFINE_GETIMPL_STORE(DILexicalBlockFile, (Discriminator), Ops);
+}
+
+DINamespace::DINamespace(LLVMContext &Context, StorageType Storage,
+ bool ExportSymbols, ArrayRef<Metadata *> Ops)
+ : DIScope(Context, DINamespaceKind, Storage, dwarf::DW_TAG_namespace, Ops),
+ ExportSymbols(ExportSymbols) {}
+DINamespace *DINamespace::getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, bool ExportSymbols,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DINamespace, (Scope, Name, ExportSymbols));
+ // The nullptr is for DIScope's File operand. This should be refactored.
+ Metadata *Ops[] = {nullptr, Scope, Name};
+ DEFINE_GETIMPL_STORE(DINamespace, (ExportSymbols), Ops);
+}
+
+DICommonBlock::DICommonBlock(LLVMContext &Context, StorageType Storage,
+ unsigned LineNo, ArrayRef<Metadata *> Ops)
+ : DIScope(Context, DICommonBlockKind, Storage, dwarf::DW_TAG_common_block,
+ Ops),
+ LineNo(LineNo) {}
+DICommonBlock *DICommonBlock::getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *Decl, MDString *Name,
+ Metadata *File, unsigned LineNo,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DICommonBlock, (Scope, Decl, Name, File, LineNo));
+ // The nullptr is for DIScope's File operand. This should be refactored.
+ Metadata *Ops[] = {Scope, Decl, Name, File};
+ DEFINE_GETIMPL_STORE(DICommonBlock, (LineNo), Ops);
+}
+
+DIModule::DIModule(LLVMContext &Context, StorageType Storage, unsigned LineNo,
+ bool IsDecl, ArrayRef<Metadata *> Ops)
+ : DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops),
+ LineNo(LineNo), IsDecl(IsDecl) {}
+DIModule *DIModule::getImpl(LLVMContext &Context, Metadata *File,
+ Metadata *Scope, MDString *Name,
+ MDString *ConfigurationMacros,
+ MDString *IncludePath, MDString *APINotesFile,
+ unsigned LineNo, bool IsDecl, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIModule, (File, Scope, Name, ConfigurationMacros,
+ IncludePath, APINotesFile, LineNo, IsDecl));
+ Metadata *Ops[] = {File, Scope, Name, ConfigurationMacros,
+ IncludePath, APINotesFile};
+ DEFINE_GETIMPL_STORE(DIModule, (LineNo, IsDecl), Ops);
+}
+DITemplateTypeParameter::DITemplateTypeParameter(LLVMContext &Context,
+ StorageType Storage,
+ bool IsDefault,
+ ArrayRef<Metadata *> Ops)
+ : DITemplateParameter(Context, DITemplateTypeParameterKind, Storage,
+ dwarf::DW_TAG_template_type_parameter, IsDefault,
+ Ops) {}
+
+DITemplateTypeParameter *
+DITemplateTypeParameter::getImpl(LLVMContext &Context, MDString *Name,
+ Metadata *Type, bool isDefault,
+ StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DITemplateTypeParameter, (Name, Type, isDefault));
+ Metadata *Ops[] = {Name, Type};
+ DEFINE_GETIMPL_STORE(DITemplateTypeParameter, (isDefault), Ops);
+}
+
+DITemplateValueParameter *DITemplateValueParameter::getImpl(
+ LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *Type,
+ bool isDefault, Metadata *Value, StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DITemplateValueParameter,
+ (Tag, Name, Type, isDefault, Value));
+ Metadata *Ops[] = {Name, Type, Value};
+ DEFINE_GETIMPL_STORE(DITemplateValueParameter, (Tag, isDefault), Ops);
+}
+
+DIGlobalVariable *
+DIGlobalVariable::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line,
+ Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
+ Metadata *StaticDataMemberDeclaration,
+ Metadata *TemplateParams, uint32_t AlignInBits,
+ Metadata *Annotations, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ assert(isCanonical(LinkageName) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(
+ DIGlobalVariable,
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
+ StaticDataMemberDeclaration, TemplateParams, AlignInBits, Annotations));
+ Metadata *Ops[] = {Scope,
+ Name,
+ File,
+ Type,
+ Name,
+ LinkageName,
+ StaticDataMemberDeclaration,
+ TemplateParams,
+ Annotations};
+ DEFINE_GETIMPL_STORE(DIGlobalVariable,
+ (Line, IsLocalToUnit, IsDefinition, AlignInBits), Ops);
+}
+
+DILocalVariable *
+DILocalVariable::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ Metadata *File, unsigned Line, Metadata *Type,
+ unsigned Arg, DIFlags Flags, uint32_t AlignInBits,
+ Metadata *Annotations, StorageType Storage,
+ bool ShouldCreate) {
+ // 64K ought to be enough for any frontend.
+ assert(Arg <= UINT16_MAX && "Expected argument number to fit in 16-bits");
+
+ assert(Scope && "Expected scope");
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DILocalVariable, (Scope, Name, File, Line, Type, Arg,
+ Flags, AlignInBits, Annotations));
+ Metadata *Ops[] = {Scope, Name, File, Type, Annotations};
+ DEFINE_GETIMPL_STORE(DILocalVariable, (Line, Arg, Flags, AlignInBits), Ops);
+}
+
+DIVariable::DIVariable(LLVMContext &C, unsigned ID, StorageType Storage,
+ signed Line, ArrayRef<Metadata *> Ops,
+ uint32_t AlignInBits)
+ : DINode(C, ID, Storage, dwarf::DW_TAG_variable, Ops), Line(Line),
+ AlignInBits(AlignInBits) {}
+Optional<uint64_t> DIVariable::getSizeInBits() const {
+ // This is used by the Verifier so be mindful of broken types.
+ const Metadata *RawType = getRawType();
+ while (RawType) {
+ // Try to get the size directly.
+ if (auto *T = dyn_cast<DIType>(RawType))
+ if (uint64_t Size = T->getSizeInBits())
+ return Size;
+
+ if (auto *DT = dyn_cast<DIDerivedType>(RawType)) {
+ // Look at the base type.
+ RawType = DT->getRawBaseType();
+ continue;
+ }
+
+ // Missing type or size.
+ break;
+ }
+
+ // Fail gracefully.
+ return None;
+}
+
+DILabel::DILabel(LLVMContext &C, StorageType Storage, unsigned Line,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, DILabelKind, Storage, dwarf::DW_TAG_label, Ops), Line(Line) {}
+DILabel *DILabel::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ Metadata *File, unsigned Line, StorageType Storage,
+ bool ShouldCreate) {
+ assert(Scope && "Expected scope");
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DILabel, (Scope, Name, File, Line));
+ Metadata *Ops[] = {Scope, Name, File};
+ DEFINE_GETIMPL_STORE(DILabel, (Line), Ops);
+}
+
+DIExpression *DIExpression::getImpl(LLVMContext &Context,
+ ArrayRef<uint64_t> Elements,
+ StorageType Storage, bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIExpression, (Elements));
+ DEFINE_GETIMPL_STORE_NO_OPS(DIExpression, (Elements));
+}
+bool DIExpression::isEntryValue() const {
+ return getNumElements() > 0 && getElement(0) == dwarf::DW_OP_LLVM_entry_value;
+}
+bool DIExpression::startsWithDeref() const {
+ return getNumElements() > 0 && getElement(0) == dwarf::DW_OP_deref;
+}
+
+unsigned DIExpression::ExprOperand::getSize() const {
+ uint64_t Op = getOp();
+
+ if (Op >= dwarf::DW_OP_breg0 && Op <= dwarf::DW_OP_breg31)
+ return 2;
+
+ switch (Op) {
+ case dwarf::DW_OP_LLVM_convert:
+ case dwarf::DW_OP_LLVM_fragment:
+ case dwarf::DW_OP_bregx:
+ return 3;
+ case dwarf::DW_OP_constu:
+ case dwarf::DW_OP_consts:
+ case dwarf::DW_OP_deref_size:
+ case dwarf::DW_OP_plus_uconst:
+ case dwarf::DW_OP_LLVM_tag_offset:
+ case dwarf::DW_OP_LLVM_entry_value:
+ case dwarf::DW_OP_LLVM_arg:
+ case dwarf::DW_OP_regx:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+bool DIExpression::isValid() const {
+ for (auto I = expr_op_begin(), E = expr_op_end(); I != E; ++I) {
+ // Check that there's space for the operand.
+ if (I->get() + I->getSize() > E->get())
+ return false;
+
+ uint64_t Op = I->getOp();
+ if ((Op >= dwarf::DW_OP_reg0 && Op <= dwarf::DW_OP_reg31) ||
+ (Op >= dwarf::DW_OP_breg0 && Op <= dwarf::DW_OP_breg31))
+ return true;
+
+ // Check that the operand is valid.
+ switch (Op) {
+ default:
+ return false;
+ case dwarf::DW_OP_LLVM_fragment:
+ // A fragment operator must appear at the end.
+ return I->get() + I->getSize() == E->get();
+ case dwarf::DW_OP_stack_value: {
+ // Must be the last one or followed by a DW_OP_LLVM_fragment.
+ if (I->get() + I->getSize() == E->get())
+ break;
+ auto J = I;
+ if ((++J)->getOp() != dwarf::DW_OP_LLVM_fragment)
+ return false;
+ break;
+ }
+ case dwarf::DW_OP_swap: {
+ // Must be more than one implicit element on the stack.
+
+ // FIXME: A better way to implement this would be to add a local variable
+ // that keeps track of the stack depth and introduce something like a
+ // DW_LLVM_OP_implicit_location as a placeholder for the location this
+ // DIExpression is attached to, or else pass the number of implicit stack
+ // elements into isValid.
+ if (getNumElements() == 1)
+ return false;
+ break;
+ }
+ case dwarf::DW_OP_LLVM_entry_value: {
+ // An entry value operator must appear at the beginning and the number of
+ // operations it cover can currently only be 1, because we support only
+ // entry values of a simple register location. One reason for this is that
+ // we currently can't calculate the size of the resulting DWARF block for
+ // other expressions.
+ return I->get() == expr_op_begin()->get() && I->getArg(0) == 1;
+ }
+ case dwarf::DW_OP_LLVM_implicit_pointer:
+ case dwarf::DW_OP_LLVM_convert:
+ case dwarf::DW_OP_LLVM_arg:
+ case dwarf::DW_OP_LLVM_tag_offset:
+ case dwarf::DW_OP_constu:
+ case dwarf::DW_OP_plus_uconst:
+ case dwarf::DW_OP_plus:
+ case dwarf::DW_OP_minus:
+ case dwarf::DW_OP_mul:
+ case dwarf::DW_OP_div:
+ case dwarf::DW_OP_mod:
+ case dwarf::DW_OP_or:
+ case dwarf::DW_OP_and:
+ case dwarf::DW_OP_xor:
+ case dwarf::DW_OP_shl:
+ case dwarf::DW_OP_shr:
+ case dwarf::DW_OP_shra:
+ case dwarf::DW_OP_deref:
+ case dwarf::DW_OP_deref_size:
+ case dwarf::DW_OP_xderef:
+ case dwarf::DW_OP_lit0:
+ case dwarf::DW_OP_not:
+ case dwarf::DW_OP_dup:
+ case dwarf::DW_OP_regx:
+ case dwarf::DW_OP_bregx:
+ case dwarf::DW_OP_push_object_address:
+ case dwarf::DW_OP_over:
+ case dwarf::DW_OP_consts:
+ break;
+ }
+ }
+ return true;
+}
+
+bool DIExpression::isImplicit() const {
+ if (!isValid())
+ return false;
+
+ if (getNumElements() == 0)
+ return false;
+
+ for (const auto &It : expr_ops()) {
+ switch (It.getOp()) {
+ default:
+ break;
+ case dwarf::DW_OP_stack_value:
+ case dwarf::DW_OP_LLVM_tag_offset:
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool DIExpression::isComplex() const {
+ if (!isValid())
+ return false;
+
+ if (getNumElements() == 0)
+ return false;
+
+ // If there are any elements other than fragment or tag_offset, then some
+ // kind of complex computation occurs.
+ for (const auto &It : expr_ops()) {
+ switch (It.getOp()) {
+ case dwarf::DW_OP_LLVM_tag_offset:
+ case dwarf::DW_OP_LLVM_fragment:
+ continue;
+ default:
+ return true;
+ }
+ }
+
+ return false;
+}
+
+Optional<DIExpression::FragmentInfo>
+DIExpression::getFragmentInfo(expr_op_iterator Start, expr_op_iterator End) {
+ for (auto I = Start; I != End; ++I)
+ if (I->getOp() == dwarf::DW_OP_LLVM_fragment) {
+ DIExpression::FragmentInfo Info = {I->getArg(1), I->getArg(0)};
+ return Info;
+ }
+ return None;
+}
+
+void DIExpression::appendOffset(SmallVectorImpl<uint64_t> &Ops,
+ int64_t Offset) {
+ if (Offset > 0) {
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
+ Ops.push_back(Offset);
+ } else if (Offset < 0) {
+ Ops.push_back(dwarf::DW_OP_constu);
+ Ops.push_back(-Offset);
+ Ops.push_back(dwarf::DW_OP_minus);
+ }
+}
+
+bool DIExpression::extractIfOffset(int64_t &Offset) const {
+ if (getNumElements() == 0) {
+ Offset = 0;
+ return true;
+ }
+
+ if (getNumElements() == 2 && Elements[0] == dwarf::DW_OP_plus_uconst) {
+ Offset = Elements[1];
+ return true;
+ }
+
+ if (getNumElements() == 3 && Elements[0] == dwarf::DW_OP_constu) {
+ if (Elements[2] == dwarf::DW_OP_plus) {
+ Offset = Elements[1];
+ return true;
+ }
+ if (Elements[2] == dwarf::DW_OP_minus) {
+ Offset = -Elements[1];
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool DIExpression::hasAllLocationOps(unsigned N) const {
+ SmallDenseSet<uint64_t, 4> SeenOps;
+ for (auto ExprOp : expr_ops())
+ if (ExprOp.getOp() == dwarf::DW_OP_LLVM_arg)
+ SeenOps.insert(ExprOp.getArg(0));
+ for (uint64_t Idx = 0; Idx < N; ++Idx)
+ if (!is_contained(SeenOps, Idx))
+ return false;
+ return true;
+}
+
+const DIExpression *DIExpression::extractAddressClass(const DIExpression *Expr,
+ unsigned &AddrClass) {
+ // FIXME: This seems fragile. Nothing that verifies that these elements
+ // actually map to ops and not operands.
+ const unsigned PatternSize = 4;
+ if (Expr->Elements.size() >= PatternSize &&
+ Expr->Elements[PatternSize - 4] == dwarf::DW_OP_constu &&
+ Expr->Elements[PatternSize - 2] == dwarf::DW_OP_swap &&
+ Expr->Elements[PatternSize - 1] == dwarf::DW_OP_xderef) {
+ AddrClass = Expr->Elements[PatternSize - 3];
+
+ if (Expr->Elements.size() == PatternSize)
+ return nullptr;
+ return DIExpression::get(Expr->getContext(),
+ makeArrayRef(&*Expr->Elements.begin(),
+ Expr->Elements.size() - PatternSize));
+ }
+ return Expr;
+}
+
+DIExpression *DIExpression::prepend(const DIExpression *Expr, uint8_t Flags,
+ int64_t Offset) {
+ SmallVector<uint64_t, 8> Ops;
+ if (Flags & DIExpression::DerefBefore)
+ Ops.push_back(dwarf::DW_OP_deref);
+
+ appendOffset(Ops, Offset);
+ if (Flags & DIExpression::DerefAfter)
+ Ops.push_back(dwarf::DW_OP_deref);
+
+ bool StackValue = Flags & DIExpression::StackValue;
+ bool EntryValue = Flags & DIExpression::EntryValue;
+
+ return prependOpcodes(Expr, Ops, StackValue, EntryValue);
+}
+
+DIExpression *DIExpression::appendOpsToArg(const DIExpression *Expr,
+ ArrayRef<uint64_t> Ops,
+ unsigned ArgNo, bool StackValue) {
+ assert(Expr && "Can't add ops to this expression");
+
+ // Handle non-variadic intrinsics by prepending the opcodes.
+ if (!any_of(Expr->expr_ops(),
+ [](auto Op) { return Op.getOp() == dwarf::DW_OP_LLVM_arg; })) {
+ assert(ArgNo == 0 &&
+ "Location Index must be 0 for a non-variadic expression.");
+ SmallVector<uint64_t, 8> NewOps(Ops.begin(), Ops.end());
+ return DIExpression::prependOpcodes(Expr, NewOps, StackValue);
+ }
+
+ SmallVector<uint64_t, 8> NewOps;
+ for (auto Op : Expr->expr_ops()) {
+ Op.appendToVector(NewOps);
+ if (Op.getOp() == dwarf::DW_OP_LLVM_arg && Op.getArg(0) == ArgNo)
+ NewOps.insert(NewOps.end(), Ops.begin(), Ops.end());
+ }
+
+ return DIExpression::get(Expr->getContext(), NewOps);
+}
+
+DIExpression *DIExpression::replaceArg(const DIExpression *Expr,
+ uint64_t OldArg, uint64_t NewArg) {
+ assert(Expr && "Can't replace args in this expression");
+
+ SmallVector<uint64_t, 8> NewOps;
+
+ for (auto Op : Expr->expr_ops()) {
+ if (Op.getOp() != dwarf::DW_OP_LLVM_arg || Op.getArg(0) < OldArg) {
+ Op.appendToVector(NewOps);
+ continue;
+ }
+ NewOps.push_back(dwarf::DW_OP_LLVM_arg);
+ uint64_t Arg = Op.getArg(0) == OldArg ? NewArg : Op.getArg(0);
+ // OldArg has been deleted from the Op list, so decrement all indices
+ // greater than it.
+ if (Arg > OldArg)
+ --Arg;
+ NewOps.push_back(Arg);
+ }
+ return DIExpression::get(Expr->getContext(), NewOps);
+}
+
+DIExpression *DIExpression::prependOpcodes(const DIExpression *Expr,
+ SmallVectorImpl<uint64_t> &Ops,
+ bool StackValue, bool EntryValue) {
+ assert(Expr && "Can't prepend ops to this expression");
+
+ if (EntryValue) {
+ Ops.push_back(dwarf::DW_OP_LLVM_entry_value);
+ // Use a block size of 1 for the target register operand. The
+ // DWARF backend currently cannot emit entry values with a block
+ // size > 1.
+ Ops.push_back(1);
+ }
+
+ // If there are no ops to prepend, do not even add the DW_OP_stack_value.
+ if (Ops.empty())
+ StackValue = false;
+ for (auto Op : Expr->expr_ops()) {
+ // A DW_OP_stack_value comes at the end, but before a DW_OP_LLVM_fragment.
+ if (StackValue) {
+ if (Op.getOp() == dwarf::DW_OP_stack_value)
+ StackValue = false;
+ else if (Op.getOp() == dwarf::DW_OP_LLVM_fragment) {
+ Ops.push_back(dwarf::DW_OP_stack_value);
+ StackValue = false;
+ }
+ }
+ Op.appendToVector(Ops);
+ }
+ if (StackValue)
+ Ops.push_back(dwarf::DW_OP_stack_value);
+ return DIExpression::get(Expr->getContext(), Ops);
+}
+
+DIExpression *DIExpression::append(const DIExpression *Expr,
+ ArrayRef<uint64_t> Ops) {
+ assert(Expr && !Ops.empty() && "Can't append ops to this expression");
+
+ // Copy Expr's current op list.
+ SmallVector<uint64_t, 16> NewOps;
+ for (auto Op : Expr->expr_ops()) {
+ // Append new opcodes before DW_OP_{stack_value, LLVM_fragment}.
+ if (Op.getOp() == dwarf::DW_OP_stack_value ||
+ Op.getOp() == dwarf::DW_OP_LLVM_fragment) {
+ NewOps.append(Ops.begin(), Ops.end());
+
+ // Ensure that the new opcodes are only appended once.
+ Ops = None;
+ }
+ Op.appendToVector(NewOps);
+ }
+
+ NewOps.append(Ops.begin(), Ops.end());
+ auto *result = DIExpression::get(Expr->getContext(), NewOps);
+ assert(result->isValid() && "concatenated expression is not valid");
+ return result;
+}
+
+DIExpression *DIExpression::appendToStack(const DIExpression *Expr,
+ ArrayRef<uint64_t> Ops) {
+ assert(Expr && !Ops.empty() && "Can't append ops to this expression");
+ assert(none_of(Ops,
+ [](uint64_t Op) {
+ return Op == dwarf::DW_OP_stack_value ||
+ Op == dwarf::DW_OP_LLVM_fragment;
+ }) &&
+ "Can't append this op");
+
+ // Append a DW_OP_deref after Expr's current op list if it's non-empty and
+ // has no DW_OP_stack_value.
+ //
+ // Match .* DW_OP_stack_value (DW_OP_LLVM_fragment A B)?.
+ Optional<FragmentInfo> FI = Expr->getFragmentInfo();
+ unsigned DropUntilStackValue = FI ? 3 : 0;
+ ArrayRef<uint64_t> ExprOpsBeforeFragment =
+ Expr->getElements().drop_back(DropUntilStackValue);
+ bool NeedsDeref = (Expr->getNumElements() > DropUntilStackValue) &&
+ (ExprOpsBeforeFragment.back() != dwarf::DW_OP_stack_value);
+ bool NeedsStackValue = NeedsDeref || ExprOpsBeforeFragment.empty();
+
+ // Append a DW_OP_deref after Expr's current op list if needed, then append
+ // the new ops, and finally ensure that a single DW_OP_stack_value is present.
+ SmallVector<uint64_t, 16> NewOps;
+ if (NeedsDeref)
+ NewOps.push_back(dwarf::DW_OP_deref);
+ NewOps.append(Ops.begin(), Ops.end());
+ if (NeedsStackValue)
+ NewOps.push_back(dwarf::DW_OP_stack_value);
+ return DIExpression::append(Expr, NewOps);
+}
+
+Optional<DIExpression *> DIExpression::createFragmentExpression(
+ const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits) {
+ SmallVector<uint64_t, 8> Ops;
+ // Copy over the expression, but leave off any trailing DW_OP_LLVM_fragment.
+ if (Expr) {
+ for (auto Op : Expr->expr_ops()) {
+ switch (Op.getOp()) {
+ default:
+ break;
+ case dwarf::DW_OP_shr:
+ case dwarf::DW_OP_shra:
+ case dwarf::DW_OP_shl:
+ case dwarf::DW_OP_plus:
+ case dwarf::DW_OP_plus_uconst:
+ case dwarf::DW_OP_minus:
+ // We can't safely split arithmetic or shift operations into multiple
+ // fragments because we can't express carry-over between fragments.
+ //
+ // FIXME: We *could* preserve the lowest fragment of a constant offset
+ // operation if the offset fits into SizeInBits.
+ return None;
+ case dwarf::DW_OP_LLVM_fragment: {
+ // Make the new offset point into the existing fragment.
+ uint64_t FragmentOffsetInBits = Op.getArg(0);
+ uint64_t FragmentSizeInBits = Op.getArg(1);
+ (void)FragmentSizeInBits;
+ assert((OffsetInBits + SizeInBits <= FragmentSizeInBits) &&
+ "new fragment outside of original fragment");
+ OffsetInBits += FragmentOffsetInBits;
+ continue;
+ }
+ }
+ Op.appendToVector(Ops);
+ }
+ }
+ assert(Expr && "Unknown DIExpression");
+ Ops.push_back(dwarf::DW_OP_LLVM_fragment);
+ Ops.push_back(OffsetInBits);
+ Ops.push_back(SizeInBits);
+ return DIExpression::get(Expr->getContext(), Ops);
+}
+
+std::pair<DIExpression *, const ConstantInt *>
+DIExpression::constantFold(const ConstantInt *CI) {
+ // Copy the APInt so we can modify it.
+ APInt NewInt = CI->getValue();
+ SmallVector<uint64_t, 8> Ops;
+
+ // Fold operators only at the beginning of the expression.
+ bool First = true;
+ bool Changed = false;
+ for (auto Op : expr_ops()) {
+ switch (Op.getOp()) {
+ default:
+ // We fold only the leading part of the expression; if we get to a part
+ // that we're going to copy unchanged, and haven't done any folding,
+ // then the entire expression is unchanged and we can return early.
+ if (!Changed)
+ return {this, CI};
+ First = false;
+ break;
+ case dwarf::DW_OP_LLVM_convert:
+ if (!First)
+ break;
+ Changed = true;
+ if (Op.getArg(1) == dwarf::DW_ATE_signed)
+ NewInt = NewInt.sextOrTrunc(Op.getArg(0));
+ else {
+ assert(Op.getArg(1) == dwarf::DW_ATE_unsigned && "Unexpected operand");
+ NewInt = NewInt.zextOrTrunc(Op.getArg(0));
+ }
+ continue;
+ }
+ Op.appendToVector(Ops);
+ }
+ if (!Changed)
+ return {this, CI};
+ return {DIExpression::get(getContext(), Ops),
+ ConstantInt::get(getContext(), NewInt)};
+}
+
+uint64_t DIExpression::getNumLocationOperands() const {
+ uint64_t Result = 0;
+ for (auto ExprOp : expr_ops())
+ if (ExprOp.getOp() == dwarf::DW_OP_LLVM_arg)
+ Result = std::max(Result, ExprOp.getArg(0) + 1);
+ assert(hasAllLocationOps(Result) &&
+ "Expression is missing one or more location operands.");
+ return Result;
+}
+
+llvm::Optional<DIExpression::SignedOrUnsignedConstant>
+DIExpression::isConstant() const {
+
+ // Recognize signed and unsigned constants.
+ // An signed constants can be represented as DW_OP_consts C DW_OP_stack_value
+ // (DW_OP_LLVM_fragment of Len).
+ // An unsigned constant can be represented as
+ // DW_OP_constu C DW_OP_stack_value (DW_OP_LLVM_fragment of Len).
+
+ if ((getNumElements() != 2 && getNumElements() != 3 &&
+ getNumElements() != 6) ||
+ (getElement(0) != dwarf::DW_OP_consts &&
+ getElement(0) != dwarf::DW_OP_constu))
+ return None;
+
+ if (getNumElements() == 2 && getElement(0) == dwarf::DW_OP_consts)
+ return SignedOrUnsignedConstant::SignedConstant;
+
+ if ((getNumElements() == 3 && getElement(2) != dwarf::DW_OP_stack_value) ||
+ (getNumElements() == 6 && (getElement(2) != dwarf::DW_OP_stack_value ||
+ getElement(3) != dwarf::DW_OP_LLVM_fragment)))
+ return None;
+ return getElement(0) == dwarf::DW_OP_constu
+ ? SignedOrUnsignedConstant::UnsignedConstant
+ : SignedOrUnsignedConstant::SignedConstant;
+}
+
+DIExpression::ExtOps DIExpression::getExtOps(unsigned FromSize, unsigned ToSize,
+ bool Signed) {
+ dwarf::TypeKind TK = Signed ? dwarf::DW_ATE_signed : dwarf::DW_ATE_unsigned;
+ DIExpression::ExtOps Ops{{dwarf::DW_OP_LLVM_convert, FromSize, TK,
+ dwarf::DW_OP_LLVM_convert, ToSize, TK}};
+ return Ops;
+}
+
+DIExpression *DIExpression::appendExt(const DIExpression *Expr,
+ unsigned FromSize, unsigned ToSize,
+ bool Signed) {
+ return appendToStack(Expr, getExtOps(FromSize, ToSize, Signed));
+}
+
+DIGlobalVariableExpression *
+DIGlobalVariableExpression::getImpl(LLVMContext &Context, Metadata *Variable,
+ Metadata *Expression, StorageType Storage,
+ bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIGlobalVariableExpression, (Variable, Expression));
+ Metadata *Ops[] = {Variable, Expression};
+ DEFINE_GETIMPL_STORE_NO_CONSTRUCTOR_ARGS(DIGlobalVariableExpression, Ops);
+}
+DIObjCProperty::DIObjCProperty(LLVMContext &C, StorageType Storage,
+ unsigned Line, unsigned Attributes,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, DIObjCPropertyKind, Storage, dwarf::DW_TAG_APPLE_property, Ops),
+ Line(Line), Attributes(Attributes) {}
+
+DIObjCProperty *DIObjCProperty::getImpl(
+ LLVMContext &Context, MDString *Name, Metadata *File, unsigned Line,
+ MDString *GetterName, MDString *SetterName, unsigned Attributes,
+ Metadata *Type, StorageType Storage, bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ assert(isCanonical(GetterName) && "Expected canonical MDString");
+ assert(isCanonical(SetterName) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIObjCProperty, (Name, File, Line, GetterName,
+ SetterName, Attributes, Type));
+ Metadata *Ops[] = {Name, File, GetterName, SetterName, Type};
+ DEFINE_GETIMPL_STORE(DIObjCProperty, (Line, Attributes), Ops);
+}
+
+DIImportedEntity *DIImportedEntity::getImpl(LLVMContext &Context, unsigned Tag,
+ Metadata *Scope, Metadata *Entity,
+ Metadata *File, unsigned Line,
+ MDString *Name, Metadata *Elements,
+ StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIImportedEntity,
+ (Tag, Scope, Entity, File, Line, Name, Elements));
+ Metadata *Ops[] = {Scope, Entity, Name, File, Elements};
+ DEFINE_GETIMPL_STORE(DIImportedEntity, (Tag, Line), Ops);
+}
+
+DIMacro *DIMacro::getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
+ MDString *Name, MDString *Value, StorageType Storage,
+ bool ShouldCreate) {
+ assert(isCanonical(Name) && "Expected canonical MDString");
+ DEFINE_GETIMPL_LOOKUP(DIMacro, (MIType, Line, Name, Value));
+ Metadata *Ops[] = {Name, Value};
+ DEFINE_GETIMPL_STORE(DIMacro, (MIType, Line), Ops);
+}
+
+DIMacroFile *DIMacroFile::getImpl(LLVMContext &Context, unsigned MIType,
+ unsigned Line, Metadata *File,
+ Metadata *Elements, StorageType Storage,
+ bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIMacroFile, (MIType, Line, File, Elements));
+ Metadata *Ops[] = {File, Elements};
+ DEFINE_GETIMPL_STORE(DIMacroFile, (MIType, Line), Ops);
+}
+
+DIArgList *DIArgList::getImpl(LLVMContext &Context,
+ ArrayRef<ValueAsMetadata *> Args,
+ StorageType Storage, bool ShouldCreate) {
+ DEFINE_GETIMPL_LOOKUP(DIArgList, (Args));
+ DEFINE_GETIMPL_STORE_NO_OPS(DIArgList, (Args));
+}
+
+void DIArgList::handleChangedOperand(void *Ref, Metadata *New) {
+ ValueAsMetadata **OldVMPtr = static_cast<ValueAsMetadata **>(Ref);
+ assert((!New || isa<ValueAsMetadata>(New)) &&
+ "DIArgList must be passed a ValueAsMetadata");
+ untrack();
+ bool Uniq = isUniqued();
+ if (Uniq) {
+ // We need to update the uniqueness once the Args are updated since they
+ // form the key to the DIArgLists store.
+ eraseFromStore();
+ }
+ ValueAsMetadata *NewVM = cast_or_null<ValueAsMetadata>(New);
+ for (ValueAsMetadata *&VM : Args) {
+ if (&VM == OldVMPtr) {
+ if (NewVM)
+ VM = NewVM;
+ else
+ VM = ValueAsMetadata::get(UndefValue::get(VM->getValue()->getType()));
+ }
+ }
+ if (Uniq) {
+ if (uniquify() != this)
+ storeDistinctInContext();
+ }
+ track();
+}
+void DIArgList::track() {
+ for (ValueAsMetadata *&VAM : Args)
+ if (VAM)
+ MetadataTracking::track(&VAM, *VAM, *this);
+}
+void DIArgList::untrack() {
+ for (ValueAsMetadata *&VAM : Args)
+ if (VAM)
+ MetadataTracking::untrack(&VAM, *VAM);
+}
+void DIArgList::dropAllReferences() {
+ untrack();
+ Args.clear();
+ MDNode::dropAllReferences();
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DebugLoc.cpp b/contrib/llvm-project/llvm/lib/IR/DebugLoc.cpp
new file mode 100644
index 000000000000..34c9d026b19a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DebugLoc.cpp
@@ -0,0 +1,119 @@
+//===-- DebugLoc.cpp - Implement DebugLoc class ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/DebugInfo.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// DebugLoc Implementation
+//===----------------------------------------------------------------------===//
+DebugLoc::DebugLoc(const DILocation *L) : Loc(const_cast<DILocation *>(L)) {}
+DebugLoc::DebugLoc(const MDNode *L) : Loc(const_cast<MDNode *>(L)) {}
+
+DILocation *DebugLoc::get() const {
+ return cast_or_null<DILocation>(Loc.get());
+}
+
+unsigned DebugLoc::getLine() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getLine();
+}
+
+unsigned DebugLoc::getCol() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getColumn();
+}
+
+MDNode *DebugLoc::getScope() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getScope();
+}
+
+DILocation *DebugLoc::getInlinedAt() const {
+ assert(get() && "Expected valid DebugLoc");
+ return get()->getInlinedAt();
+}
+
+MDNode *DebugLoc::getInlinedAtScope() const {
+ return cast<DILocation>(Loc)->getInlinedAtScope();
+}
+
+DebugLoc DebugLoc::getFnDebugLoc() const {
+ // FIXME: Add a method on \a DILocation that does this work.
+ const MDNode *Scope = getInlinedAtScope();
+ if (auto *SP = getDISubprogram(Scope))
+ return DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
+
+ return DebugLoc();
+}
+
+bool DebugLoc::isImplicitCode() const {
+ if (DILocation *Loc = get()) {
+ return Loc->isImplicitCode();
+ }
+ return true;
+}
+
+void DebugLoc::setImplicitCode(bool ImplicitCode) {
+ if (DILocation *Loc = get()) {
+ Loc->setImplicitCode(ImplicitCode);
+ }
+}
+
+DebugLoc DebugLoc::appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt,
+ LLVMContext &Ctx,
+ DenseMap<const MDNode *, MDNode *> &Cache) {
+ SmallVector<DILocation *, 3> InlinedAtLocations;
+ DILocation *Last = InlinedAt;
+ DILocation *CurInlinedAt = DL;
+
+ // Gather all the inlined-at nodes.
+ while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
+ // Skip any we've already built nodes for.
+ if (auto *Found = Cache[IA]) {
+ Last = cast<DILocation>(Found);
+ break;
+ }
+
+ InlinedAtLocations.push_back(IA);
+ CurInlinedAt = IA;
+ }
+
+ // Starting from the top, rebuild the nodes to point to the new inlined-at
+ // location (then rebuilding the rest of the chain behind it) and update the
+ // map of already-constructed inlined-at nodes.
+ for (const DILocation *MD : reverse(InlinedAtLocations))
+ Cache[MD] = Last = DILocation::getDistinct(
+ Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
+
+ return Last;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void DebugLoc::dump() const { print(dbgs()); }
+#endif
+
+void DebugLoc::print(raw_ostream &OS) const {
+ if (!Loc)
+ return;
+
+ // Print source line info.
+ auto *Scope = cast<DIScope>(getScope());
+ OS << Scope->getFilename();
+ OS << ':' << getLine();
+ if (getCol() != 0)
+ OS << ':' << getCol();
+
+ if (DebugLoc InlinedAtDL = getInlinedAt()) {
+ OS << " @[ ";
+ InlinedAtDL.print(OS);
+ OS << " ]";
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DiagnosticHandler.cpp b/contrib/llvm-project/llvm/lib/IR/DiagnosticHandler.cpp
new file mode 100644
index 000000000000..683eade50291
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DiagnosticHandler.cpp
@@ -0,0 +1,87 @@
+//===- DiagnosticHandler.h - DiagnosticHandler class for LLVM -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/IR/DiagnosticHandler.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Regex.h"
+
+using namespace llvm;
+
+namespace {
+
+/// Regular expression corresponding to the value given in one of the
+/// -pass-remarks* command line flags. Passes whose name matches this regexp
+/// will emit a diagnostic when calling the associated diagnostic function
+/// (emitOptimizationRemark, emitOptimizationRemarkMissed or
+/// emitOptimizationRemarkAnalysis).
+struct PassRemarksOpt {
+ std::shared_ptr<Regex> Pattern;
+
+ void operator=(const std::string &Val) {
+ // Create a regexp object to match pass names for emitOptimizationRemark.
+ if (!Val.empty()) {
+ Pattern = std::make_shared<Regex>(Val);
+ std::string RegexError;
+ if (!Pattern->isValid(RegexError))
+ report_fatal_error(Twine("Invalid regular expression '") + Val +
+ "' in -pass-remarks: " + RegexError,
+ false);
+ }
+ }
+};
+
+static PassRemarksOpt PassRemarksPassedOptLoc;
+static PassRemarksOpt PassRemarksMissedOptLoc;
+static PassRemarksOpt PassRemarksAnalysisOptLoc;
+
+// -pass-remarks
+// Command line flag to enable emitOptimizationRemark()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>> PassRemarks(
+ "pass-remarks", cl::value_desc("pattern"),
+ cl::desc("Enable optimization remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksPassedOptLoc), cl::ValueRequired);
+
+// -pass-remarks-missed
+// Command line flag to enable emitOptimizationRemarkMissed()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>> PassRemarksMissed(
+ "pass-remarks-missed", cl::value_desc("pattern"),
+ cl::desc("Enable missed optimization remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksMissedOptLoc), cl::ValueRequired);
+
+// -pass-remarks-analysis
+// Command line flag to enable emitOptimizationRemarkAnalysis()
+static cl::opt<PassRemarksOpt, true, cl::parser<std::string>>
+ PassRemarksAnalysis(
+ "pass-remarks-analysis", cl::value_desc("pattern"),
+ cl::desc(
+ "Enable optimization analysis remarks from passes whose name match "
+ "the given regular expression"),
+ cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired);
+}
+
+bool DiagnosticHandler::isAnalysisRemarkEnabled(StringRef PassName) const {
+ return (PassRemarksAnalysisOptLoc.Pattern &&
+ PassRemarksAnalysisOptLoc.Pattern->match(PassName));
+}
+bool DiagnosticHandler::isMissedOptRemarkEnabled(StringRef PassName) const {
+ return (PassRemarksMissedOptLoc.Pattern &&
+ PassRemarksMissedOptLoc.Pattern->match(PassName));
+}
+bool DiagnosticHandler::isPassedOptRemarkEnabled(StringRef PassName) const {
+ return (PassRemarksPassedOptLoc.Pattern &&
+ PassRemarksPassedOptLoc.Pattern->match(PassName));
+}
+
+bool DiagnosticHandler::isAnyRemarkEnabled() const {
+ return (PassRemarksPassedOptLoc.Pattern || PassRemarksMissedOptLoc.Pattern ||
+ PassRemarksAnalysisOptLoc.Pattern);
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DiagnosticInfo.cpp b/contrib/llvm-project/llvm/lib/IR/DiagnosticInfo.cpp
new file mode 100644
index 000000000000..50fe6829ad86
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DiagnosticInfo.cpp
@@ -0,0 +1,440 @@
+//===- llvm/IR/DiagnosticInfo.cpp - Diagnostic Definitions ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the different classes involved in low level diagnostics.
+//
+// Diagnostics reporting is still done as part of the LLVMContext.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/InstructionCost.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <atomic>
+#include <string>
+
+using namespace llvm;
+
+int llvm::getNextAvailablePluginDiagnosticKind() {
+ static std::atomic<int> PluginKindID(DK_FirstPluginKind);
+ return ++PluginKindID;
+}
+
+const char *OptimizationRemarkAnalysis::AlwaysPrint = "";
+
+DiagnosticInfoInlineAsm::DiagnosticInfoInlineAsm(const Instruction &I,
+ const Twine &MsgStr,
+ DiagnosticSeverity Severity)
+ : DiagnosticInfo(DK_InlineAsm, Severity), MsgStr(MsgStr), Instr(&I) {
+ if (const MDNode *SrcLoc = I.getMetadata("srcloc")) {
+ if (SrcLoc->getNumOperands() != 0)
+ if (const auto *CI =
+ mdconst::dyn_extract<ConstantInt>(SrcLoc->getOperand(0)))
+ LocCookie = CI->getZExtValue();
+ }
+}
+
+void DiagnosticInfoInlineAsm::print(DiagnosticPrinter &DP) const {
+ DP << getMsgStr();
+ if (getLocCookie())
+ DP << " at line " << getLocCookie();
+}
+
+void DiagnosticInfoResourceLimit::print(DiagnosticPrinter &DP) const {
+ DP << getResourceName() << " (" << getResourceSize() << ") exceeds limit ("
+ << getResourceLimit() << ") in function '" << getFunction() << '\'';
+}
+
+void DiagnosticInfoDebugMetadataVersion::print(DiagnosticPrinter &DP) const {
+ DP << "ignoring debug info with an invalid version (" << getMetadataVersion()
+ << ") in " << getModule();
+}
+
+void DiagnosticInfoIgnoringInvalidDebugMetadata::print(
+ DiagnosticPrinter &DP) const {
+ DP << "ignoring invalid debug info in " << getModule().getModuleIdentifier();
+}
+
+void DiagnosticInfoSampleProfile::print(DiagnosticPrinter &DP) const {
+ if (!FileName.empty()) {
+ DP << getFileName();
+ if (LineNum > 0)
+ DP << ":" << getLineNum();
+ DP << ": ";
+ }
+ DP << getMsg();
+}
+
+void DiagnosticInfoPGOProfile::print(DiagnosticPrinter &DP) const {
+ if (getFileName())
+ DP << getFileName() << ": ";
+ DP << getMsg();
+}
+
+void DiagnosticInfo::anchor() {}
+void DiagnosticInfoStackSize::anchor() {}
+void DiagnosticInfoWithLocationBase::anchor() {}
+void DiagnosticInfoIROptimization::anchor() {}
+
+DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) {
+ if (!DL)
+ return;
+ File = DL->getFile();
+ Line = DL->getLine();
+ Column = DL->getColumn();
+}
+
+DiagnosticLocation::DiagnosticLocation(const DISubprogram *SP) {
+ if (!SP)
+ return;
+
+ File = SP->getFile();
+ Line = SP->getScopeLine();
+ Column = 0;
+}
+
+StringRef DiagnosticLocation::getRelativePath() const {
+ return File->getFilename();
+}
+
+std::string DiagnosticLocation::getAbsolutePath() const {
+ StringRef Name = File->getFilename();
+ if (sys::path::is_absolute(Name))
+ return std::string(Name);
+
+ SmallString<128> Path;
+ sys::path::append(Path, File->getDirectory(), Name);
+ return sys::path::remove_leading_dotslash(Path).str();
+}
+
+std::string DiagnosticInfoWithLocationBase::getAbsolutePath() const {
+ return Loc.getAbsolutePath();
+}
+
+void DiagnosticInfoWithLocationBase::getLocation(StringRef &RelativePath,
+ unsigned &Line,
+ unsigned &Column) const {
+ RelativePath = Loc.getRelativePath();
+ Line = Loc.getLine();
+ Column = Loc.getColumn();
+}
+
+std::string DiagnosticInfoWithLocationBase::getLocationStr() const {
+ StringRef Filename("<unknown>");
+ unsigned Line = 0;
+ unsigned Column = 0;
+ if (isLocationAvailable())
+ getLocation(Filename, Line, Column);
+ return (Filename + ":" + Twine(Line) + ":" + Twine(Column)).str();
+}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key,
+ const Value *V)
+ : Key(std::string(Key)) {
+ if (auto *F = dyn_cast<Function>(V)) {
+ if (DISubprogram *SP = F->getSubprogram())
+ Loc = SP;
+ }
+ else if (auto *I = dyn_cast<Instruction>(V))
+ Loc = I->getDebugLoc();
+
+ // Only include names that correspond to user variables. FIXME: We should use
+ // debug info if available to get the name of the user variable.
+ if (isa<llvm::Argument>(V) || isa<GlobalValue>(V))
+ Val = std::string(GlobalValue::dropLLVMManglingEscape(V->getName()));
+ else if (isa<Constant>(V)) {
+ raw_string_ostream OS(Val);
+ V->printAsOperand(OS, /*PrintType=*/false);
+ } else if (auto *I = dyn_cast<Instruction>(V))
+ Val = I->getOpcodeName();
+}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, const Type *T)
+ : Key(std::string(Key)) {
+ raw_string_ostream OS(Val);
+ OS << *T;
+}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, StringRef S)
+ : Key(std::string(Key)), Val(S.str()) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, int N)
+ : Key(std::string(Key)), Val(itostr(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, float N)
+ : Key(std::string(Key)), Val(llvm::to_string(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, long N)
+ : Key(std::string(Key)), Val(itostr(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, long long N)
+ : Key(std::string(Key)), Val(itostr(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, unsigned N)
+ : Key(std::string(Key)), Val(utostr(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key,
+ unsigned long N)
+ : Key(std::string(Key)), Val(utostr(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key,
+ unsigned long long N)
+ : Key(std::string(Key)), Val(utostr(N)) {}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key,
+ ElementCount EC)
+ : Key(std::string(Key)) {
+ raw_string_ostream OS(Val);
+ EC.print(OS);
+}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key,
+ InstructionCost C)
+ : Key(std::string(Key)) {
+ raw_string_ostream OS(Val);
+ C.print(OS);
+}
+
+DiagnosticInfoOptimizationBase::Argument::Argument(StringRef Key, DebugLoc Loc)
+ : Key(std::string(Key)), Loc(Loc) {
+ if (Loc) {
+ Val = (Loc->getFilename() + ":" + Twine(Loc.getLine()) + ":" +
+ Twine(Loc.getCol())).str();
+ } else {
+ Val = "<UNKNOWN LOCATION>";
+ }
+}
+
+void DiagnosticInfoOptimizationBase::print(DiagnosticPrinter &DP) const {
+ DP << getLocationStr() << ": " << getMsg();
+ if (Hotness)
+ DP << " (hotness: " << *Hotness << ")";
+}
+
+OptimizationRemark::OptimizationRemark(const char *PassName,
+ StringRef RemarkName,
+ const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationRemark, DS_Remark, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
+
+OptimizationRemark::OptimizationRemark(const char *PassName,
+ StringRef RemarkName,
+ const Instruction *Inst)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemark, DS_Remark, PassName,
+ RemarkName, *Inst->getParent()->getParent(),
+ Inst->getDebugLoc(), Inst->getParent()) {}
+
+static const BasicBlock *getFirstFunctionBlock(const Function *Func) {
+ return Func->empty() ? nullptr : &Func->front();
+}
+
+OptimizationRemark::OptimizationRemark(const char *PassName,
+ StringRef RemarkName,
+ const Function *Func)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemark, DS_Remark, PassName,
+ RemarkName, *Func, Func->getSubprogram(),
+ getFirstFunctionBlock(Func)) {}
+
+bool OptimizationRemark::isEnabled() const {
+ const Function &Fn = getFunction();
+ LLVMContext &Ctx = Fn.getContext();
+ return Ctx.getDiagHandlerPtr()->isPassedOptRemarkEnabled(getPassName());
+}
+
+OptimizationRemarkMissed::OptimizationRemarkMissed(
+ const char *PassName, StringRef RemarkName, const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationRemarkMissed, DS_Remark, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
+
+OptimizationRemarkMissed::OptimizationRemarkMissed(const char *PassName,
+ StringRef RemarkName,
+ const Instruction *Inst)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemarkMissed, DS_Remark,
+ PassName, RemarkName,
+ *Inst->getParent()->getParent(),
+ Inst->getDebugLoc(), Inst->getParent()) {}
+
+OptimizationRemarkMissed::OptimizationRemarkMissed(const char *PassName,
+ StringRef RemarkName,
+ const Function *Func)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationRemarkMissed, DS_Remark, PassName, RemarkName, *Func,
+ Func->getSubprogram(), getFirstFunctionBlock(Func)) {}
+
+bool OptimizationRemarkMissed::isEnabled() const {
+ const Function &Fn = getFunction();
+ LLVMContext &Ctx = Fn.getContext();
+ return Ctx.getDiagHandlerPtr()->isMissedOptRemarkEnabled(getPassName());
+}
+
+OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(
+ const char *PassName, StringRef RemarkName, const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationRemarkAnalysis, DS_Remark, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
+
+OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(const char *PassName,
+ StringRef RemarkName,
+ const Instruction *Inst)
+ : DiagnosticInfoIROptimization(DK_OptimizationRemarkAnalysis, DS_Remark,
+ PassName, RemarkName,
+ *Inst->getParent()->getParent(),
+ Inst->getDebugLoc(), Inst->getParent()) {}
+
+OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(
+ enum DiagnosticKind Kind, const char *PassName, StringRef RemarkName,
+ const DiagnosticLocation &Loc, const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(Kind, DS_Remark, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(),
+ Loc, CodeRegion) {}
+
+OptimizationRemarkAnalysis::OptimizationRemarkAnalysis(const char *PassName,
+ StringRef RemarkName,
+ const Function *Func)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationRemarkAnalysis, DS_Remark, PassName, RemarkName, *Func,
+ Func->getSubprogram(), getFirstFunctionBlock(Func)) {}
+
+bool OptimizationRemarkAnalysis::isEnabled() const {
+ const Function &Fn = getFunction();
+ LLVMContext &Ctx = Fn.getContext();
+ return Ctx.getDiagHandlerPtr()->isAnalysisRemarkEnabled(getPassName()) ||
+ shouldAlwaysPrint();
+}
+
+void DiagnosticInfoMIRParser::print(DiagnosticPrinter &DP) const {
+ DP << Diagnostic;
+}
+
+void DiagnosticInfoSrcMgr::print(DiagnosticPrinter &DP) const {
+ DP << Diagnostic;
+}
+
+DiagnosticInfoOptimizationFailure::DiagnosticInfoOptimizationFailure(
+ const char *PassName, StringRef RemarkName, const DiagnosticLocation &Loc,
+ const Value *CodeRegion)
+ : DiagnosticInfoIROptimization(
+ DK_OptimizationFailure, DS_Warning, PassName, RemarkName,
+ *cast<BasicBlock>(CodeRegion)->getParent(), Loc, CodeRegion) {}
+
+bool DiagnosticInfoOptimizationFailure::isEnabled() const {
+ // Only print warnings.
+ return getSeverity() == DS_Warning;
+}
+
+void DiagnosticInfoUnsupported::print(DiagnosticPrinter &DP) const {
+ std::string Str;
+ raw_string_ostream OS(Str);
+
+ OS << getLocationStr() << ": in function " << getFunction().getName() << ' '
+ << *getFunction().getFunctionType() << ": " << Msg << '\n';
+ OS.flush();
+ DP << Str;
+}
+
+void DiagnosticInfoISelFallback::print(DiagnosticPrinter &DP) const {
+ DP << "Instruction selection used fallback path for " << getFunction();
+}
+
+void DiagnosticInfoOptimizationBase::insert(StringRef S) {
+ Args.emplace_back(S);
+}
+
+void DiagnosticInfoOptimizationBase::insert(Argument A) {
+ Args.push_back(std::move(A));
+}
+
+void DiagnosticInfoOptimizationBase::insert(setIsVerbose V) {
+ IsVerbose = true;
+}
+
+void DiagnosticInfoOptimizationBase::insert(setExtraArgs EA) {
+ FirstExtraArgIndex = Args.size();
+}
+
+std::string DiagnosticInfoOptimizationBase::getMsg() const {
+ std::string Str;
+ raw_string_ostream OS(Str);
+ for (const DiagnosticInfoOptimizationBase::Argument &Arg :
+ make_range(Args.begin(), FirstExtraArgIndex == -1
+ ? Args.end()
+ : Args.begin() + FirstExtraArgIndex))
+ OS << Arg.Val;
+ return OS.str();
+}
+
+DiagnosticInfoMisExpect::DiagnosticInfoMisExpect(const Instruction *Inst,
+ Twine &Msg)
+ : DiagnosticInfoWithLocationBase(DK_MisExpect, DS_Warning,
+ *Inst->getParent()->getParent(),
+ Inst->getDebugLoc()),
+ Msg(Msg) {}
+
+void DiagnosticInfoMisExpect::print(DiagnosticPrinter &DP) const {
+ DP << getLocationStr() << ": " << getMsg();
+}
+
+void OptimizationRemarkAnalysisFPCommute::anchor() {}
+void OptimizationRemarkAnalysisAliasing::anchor() {}
+
+void llvm::diagnoseDontCall(const CallInst &CI) {
+ auto *F = CI.getCalledFunction();
+ if (!F)
+ return;
+
+ for (int i = 0; i != 2; ++i) {
+ auto AttrName = i == 0 ? "dontcall-error" : "dontcall-warn";
+ auto Sev = i == 0 ? DS_Error : DS_Warning;
+
+ if (F->hasFnAttribute(AttrName)) {
+ unsigned LocCookie = 0;
+ auto A = F->getFnAttribute(AttrName);
+ if (MDNode *MD = CI.getMetadata("srcloc"))
+ LocCookie =
+ mdconst::extract<ConstantInt>(MD->getOperand(0))->getZExtValue();
+ DiagnosticInfoDontCall D(F->getName(), A.getValueAsString(), Sev,
+ LocCookie);
+ F->getContext().diagnose(D);
+ }
+ }
+}
+
+void DiagnosticInfoDontCall::print(DiagnosticPrinter &DP) const {
+ DP << "call to " << getFunctionName() << " marked \"dontcall-";
+ if (getSeverity() == DiagnosticSeverity::DS_Error)
+ DP << "error\"";
+ else
+ DP << "warn\"";
+ if (!getNote().empty())
+ DP << ": " << getNote();
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/DiagnosticPrinter.cpp b/contrib/llvm-project/llvm/lib/IR/DiagnosticPrinter.cpp
new file mode 100644
index 000000000000..49b8bbae53be
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/DiagnosticPrinter.cpp
@@ -0,0 +1,116 @@
+//===- llvm/IR/DiagnosticPrinter.cpp - Diagnostic Printer -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a diagnostic printer relying on raw_ostream.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(signed char C) {
+ Stream << C;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(StringRef Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const char *Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(
+ const std::string &Str) {
+ Stream << Str;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned long N) {
+ Stream << N;
+ return *this;
+}
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(
+ unsigned long long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(long long N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const void *P) {
+ Stream << P;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(unsigned int N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(int N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(double N) {
+ Stream << N;
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Twine &Str) {
+ Str.print(Stream);
+ return *this;
+}
+
+// IR related types.
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Value &V) {
+ Stream << V.getName();
+ return *this;
+}
+
+DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Module &M) {
+ Stream << M.getModuleIdentifier();
+ return *this;
+}
+
+// Other types.
+DiagnosticPrinter &DiagnosticPrinterRawOStream::
+operator<<(const SMDiagnostic &Diag) {
+ // We don't have to print the SMDiagnostic kind, as the diagnostic severity
+ // is printed by the diagnostic handler.
+ Diag.print("", Stream, /*ShowColors=*/true, /*ShowKindLabel=*/false);
+ return *this;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Dominators.cpp b/contrib/llvm-project/llvm/lib/IR/Dominators.cpp
new file mode 100644
index 000000000000..09be2a8ef605
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Dominators.cpp
@@ -0,0 +1,426 @@
+//===- Dominators.cpp - Dominator Calculation -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements simple dominator construction algorithms for finding
+// forward dominators. Postdominators are available in libanalysis, but are not
+// included in libvmcore, because it's not needed. Forward dominators are
+// needed to support the Verifier pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Dominators.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <cassert>
+
+namespace llvm {
+class Argument;
+class Constant;
+class Value;
+} // namespace llvm
+using namespace llvm;
+
+bool llvm::VerifyDomInfo = false;
+static cl::opt<bool, true>
+ VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo), cl::Hidden,
+ cl::desc("Verify dominator info (time consuming)"));
+
+#ifdef EXPENSIVE_CHECKS
+static constexpr bool ExpensiveChecksEnabled = true;
+#else
+static constexpr bool ExpensiveChecksEnabled = false;
+#endif
+
+bool BasicBlockEdge::isSingleEdge() const {
+ const Instruction *TI = Start->getTerminator();
+ unsigned NumEdgesToEnd = 0;
+ for (unsigned int i = 0, n = TI->getNumSuccessors(); i < n; ++i) {
+ if (TI->getSuccessor(i) == End)
+ ++NumEdgesToEnd;
+ if (NumEdgesToEnd >= 2)
+ return false;
+ }
+ assert(NumEdgesToEnd == 1);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTree Implementation
+//===----------------------------------------------------------------------===//
+//
+// Provide public access to DominatorTree information. Implementation details
+// can be found in Dominators.h, GenericDomTree.h, and
+// GenericDomTreeConstruction.h.
+//
+//===----------------------------------------------------------------------===//
+
+template class llvm::DomTreeNodeBase<BasicBlock>;
+template class llvm::DominatorTreeBase<BasicBlock, false>; // DomTreeBase
+template class llvm::DominatorTreeBase<BasicBlock, true>; // PostDomTreeBase
+
+template class llvm::cfg::Update<BasicBlock *>;
+
+template void llvm::DomTreeBuilder::Calculate<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT);
+template void
+llvm::DomTreeBuilder::CalculateWithUpdates<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, BBUpdates U);
+
+template void llvm::DomTreeBuilder::Calculate<DomTreeBuilder::BBPostDomTree>(
+ DomTreeBuilder::BBPostDomTree &DT);
+// No CalculateWithUpdates<PostDomTree> instantiation, unless a usecase arises.
+
+template void llvm::DomTreeBuilder::InsertEdge<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, BasicBlock *From, BasicBlock *To);
+template void llvm::DomTreeBuilder::InsertEdge<DomTreeBuilder::BBPostDomTree>(
+ DomTreeBuilder::BBPostDomTree &DT, BasicBlock *From, BasicBlock *To);
+
+template void llvm::DomTreeBuilder::DeleteEdge<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, BasicBlock *From, BasicBlock *To);
+template void llvm::DomTreeBuilder::DeleteEdge<DomTreeBuilder::BBPostDomTree>(
+ DomTreeBuilder::BBPostDomTree &DT, BasicBlock *From, BasicBlock *To);
+
+template void llvm::DomTreeBuilder::ApplyUpdates<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, DomTreeBuilder::BBDomTreeGraphDiff &,
+ DomTreeBuilder::BBDomTreeGraphDiff *);
+template void llvm::DomTreeBuilder::ApplyUpdates<DomTreeBuilder::BBPostDomTree>(
+ DomTreeBuilder::BBPostDomTree &DT, DomTreeBuilder::BBPostDomTreeGraphDiff &,
+ DomTreeBuilder::BBPostDomTreeGraphDiff *);
+
+template bool llvm::DomTreeBuilder::Verify<DomTreeBuilder::BBDomTree>(
+ const DomTreeBuilder::BBDomTree &DT,
+ DomTreeBuilder::BBDomTree::VerificationLevel VL);
+template bool llvm::DomTreeBuilder::Verify<DomTreeBuilder::BBPostDomTree>(
+ const DomTreeBuilder::BBPostDomTree &DT,
+ DomTreeBuilder::BBPostDomTree::VerificationLevel VL);
+
+bool DominatorTree::invalidate(Function &F, const PreservedAnalyses &PA,
+ FunctionAnalysisManager::Invalidator &) {
+ // Check whether the analysis, all analyses on functions, or the function's
+ // CFG have been preserved.
+ auto PAC = PA.getChecker<DominatorTreeAnalysis>();
+ return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>() ||
+ PAC.preservedSet<CFGAnalyses>());
+}
+
+bool DominatorTree::dominates(const BasicBlock *BB, const Use &U) const {
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ if (auto *PN = dyn_cast<PHINode>(UserInst))
+ // A phi use using a value from a block is dominated by the end of that
+ // block. Note that the phi's parent block may not be.
+ return dominates(BB, PN->getIncomingBlock(U));
+ else
+ return properlyDominates(BB, UserInst->getParent());
+}
+
+// dominates - Return true if Def dominates a use in User. This performs
+// the special checks necessary if Def and User are in the same basic block.
+// Note that Def doesn't dominate a use in Def itself!
+bool DominatorTree::dominates(const Value *DefV,
+ const Instruction *User) const {
+ const Instruction *Def = dyn_cast<Instruction>(DefV);
+ if (!Def) {
+ assert((isa<Argument>(DefV) || isa<Constant>(DefV)) &&
+ "Should be called with an instruction, argument or constant");
+ return true; // Arguments and constants dominate everything.
+ }
+
+ const BasicBlock *UseBB = User->getParent();
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Any unreachable use is dominated, even if Def == User.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ // An instruction doesn't dominate a use in itself.
+ if (Def == User)
+ return false;
+
+ // The value defined by an invoke dominates an instruction only if it
+ // dominates every instruction in UseBB.
+ // A PHI is dominated only if the instruction dominates every possible use in
+ // the UseBB.
+ if (isa<InvokeInst>(Def) || isa<CallBrInst>(Def) || isa<PHINode>(User))
+ return dominates(Def, UseBB);
+
+ if (DefBB != UseBB)
+ return dominates(DefBB, UseBB);
+
+ return Def->comesBefore(User);
+}
+
+// true if Def would dominate a use in any instruction in UseBB.
+// note that dominates(Def, Def->getParent()) is false.
+bool DominatorTree::dominates(const Instruction *Def,
+ const BasicBlock *UseBB) const {
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Any unreachable use is dominated, even if DefBB == UseBB.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ if (DefBB == UseBB)
+ return false;
+
+ // Invoke results are only usable in the normal destination, not in the
+ // exceptional destination.
+ if (const auto *II = dyn_cast<InvokeInst>(Def)) {
+ BasicBlock *NormalDest = II->getNormalDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, UseBB);
+ }
+
+ // Callbr results are similarly only usable in the default destination.
+ if (const auto *CBI = dyn_cast<CallBrInst>(Def)) {
+ BasicBlock *NormalDest = CBI->getDefaultDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, UseBB);
+ }
+
+ return dominates(DefBB, UseBB);
+}
+
+bool DominatorTree::dominates(const BasicBlockEdge &BBE,
+ const BasicBlock *UseBB) const {
+ // If the BB the edge ends in doesn't dominate the use BB, then the
+ // edge also doesn't.
+ const BasicBlock *Start = BBE.getStart();
+ const BasicBlock *End = BBE.getEnd();
+ if (!dominates(End, UseBB))
+ return false;
+
+ // Simple case: if the end BB has a single predecessor, the fact that it
+ // dominates the use block implies that the edge also does.
+ if (End->getSinglePredecessor())
+ return true;
+
+ // The normal edge from the invoke is critical. Conceptually, what we would
+ // like to do is split it and check if the new block dominates the use.
+ // With X being the new block, the graph would look like:
+ //
+ // DefBB
+ // /\ . .
+ // / \ . .
+ // / \ . .
+ // / \ | |
+ // A X B C
+ // | \ | /
+ // . \|/
+ // . NormalDest
+ // .
+ //
+ // Given the definition of dominance, NormalDest is dominated by X iff X
+ // dominates all of NormalDest's predecessors (X, B, C in the example). X
+ // trivially dominates itself, so we only have to find if it dominates the
+ // other predecessors. Since the only way out of X is via NormalDest, X can
+ // only properly dominate a node if NormalDest dominates that node too.
+ int IsDuplicateEdge = 0;
+ for (const BasicBlock *BB : predecessors(End)) {
+ if (BB == Start) {
+ // If there are multiple edges between Start and End, by definition they
+ // can't dominate anything.
+ if (IsDuplicateEdge++)
+ return false;
+ continue;
+ }
+
+ if (!dominates(End, BB))
+ return false;
+ }
+ return true;
+}
+
+bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const {
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ // A PHI in the end of the edge is dominated by it.
+ PHINode *PN = dyn_cast<PHINode>(UserInst);
+ if (PN && PN->getParent() == BBE.getEnd() &&
+ PN->getIncomingBlock(U) == BBE.getStart())
+ return true;
+
+ // Otherwise use the edge-dominates-block query, which
+ // handles the crazy critical edge cases properly.
+ const BasicBlock *UseBB;
+ if (PN)
+ UseBB = PN->getIncomingBlock(U);
+ else
+ UseBB = UserInst->getParent();
+ return dominates(BBE, UseBB);
+}
+
+bool DominatorTree::dominates(const Value *DefV, const Use &U) const {
+ const Instruction *Def = dyn_cast<Instruction>(DefV);
+ if (!Def) {
+ assert((isa<Argument>(DefV) || isa<Constant>(DefV)) &&
+ "Should be called with an instruction, argument or constant");
+ return true; // Arguments and constants dominate everything.
+ }
+
+ Instruction *UserInst = cast<Instruction>(U.getUser());
+ const BasicBlock *DefBB = Def->getParent();
+
+ // Determine the block in which the use happens. PHI nodes use
+ // their operands on edges; simulate this by thinking of the use
+ // happening at the end of the predecessor block.
+ const BasicBlock *UseBB;
+ if (PHINode *PN = dyn_cast<PHINode>(UserInst))
+ UseBB = PN->getIncomingBlock(U);
+ else
+ UseBB = UserInst->getParent();
+
+ // Any unreachable use is dominated, even if Def == User.
+ if (!isReachableFromEntry(UseBB))
+ return true;
+
+ // Unreachable definitions don't dominate anything.
+ if (!isReachableFromEntry(DefBB))
+ return false;
+
+ // Invoke instructions define their return values on the edges to their normal
+ // successors, so we have to handle them specially.
+ // Among other things, this means they don't dominate anything in
+ // their own block, except possibly a phi, so we don't need to
+ // walk the block in any case.
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(Def)) {
+ BasicBlock *NormalDest = II->getNormalDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, U);
+ }
+
+ // Callbr results are similarly only usable in the default destination.
+ if (const auto *CBI = dyn_cast<CallBrInst>(Def)) {
+ BasicBlock *NormalDest = CBI->getDefaultDest();
+ BasicBlockEdge E(DefBB, NormalDest);
+ return dominates(E, U);
+ }
+
+ // If the def and use are in different blocks, do a simple CFG dominator
+ // tree query.
+ if (DefBB != UseBB)
+ return dominates(DefBB, UseBB);
+
+ // Ok, def and use are in the same block. If the def is an invoke, it
+ // doesn't dominate anything in the block. If it's a PHI, it dominates
+ // everything in the block.
+ if (isa<PHINode>(UserInst))
+ return true;
+
+ return Def->comesBefore(UserInst);
+}
+
+bool DominatorTree::isReachableFromEntry(const Use &U) const {
+ Instruction *I = dyn_cast<Instruction>(U.getUser());
+
+ // ConstantExprs aren't really reachable from the entry block, but they
+ // don't need to be treated like unreachable code either.
+ if (!I) return true;
+
+ // PHI nodes use their operands on their incoming edges.
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ return isReachableFromEntry(PN->getIncomingBlock(U));
+
+ // Everything else uses their operands in their own block.
+ return isReachableFromEntry(I->getParent());
+}
+
+// Edge BBE1 dominates edge BBE2 if they match or BBE1 dominates start of BBE2.
+bool DominatorTree::dominates(const BasicBlockEdge &BBE1,
+ const BasicBlockEdge &BBE2) const {
+ if (BBE1.getStart() == BBE2.getStart() && BBE1.getEnd() == BBE2.getEnd())
+ return true;
+ return dominates(BBE1, BBE2.getStart());
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTreeAnalysis and related pass implementations
+//===----------------------------------------------------------------------===//
+//
+// This implements the DominatorTreeAnalysis which is used with the new pass
+// manager. It also implements some methods from utility passes.
+//
+//===----------------------------------------------------------------------===//
+
+DominatorTree DominatorTreeAnalysis::run(Function &F,
+ FunctionAnalysisManager &) {
+ DominatorTree DT;
+ DT.recalculate(F);
+ return DT;
+}
+
+AnalysisKey DominatorTreeAnalysis::Key;
+
+DominatorTreePrinterPass::DominatorTreePrinterPass(raw_ostream &OS) : OS(OS) {}
+
+PreservedAnalyses DominatorTreePrinterPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ OS << "DominatorTree for function: " << F.getName() << "\n";
+ AM.getResult<DominatorTreeAnalysis>(F).print(OS);
+
+ return PreservedAnalyses::all();
+}
+
+PreservedAnalyses DominatorTreeVerifierPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
+ assert(DT.verify());
+ (void)DT;
+ return PreservedAnalyses::all();
+}
+
+//===----------------------------------------------------------------------===//
+// DominatorTreeWrapperPass Implementation
+//===----------------------------------------------------------------------===//
+//
+// The implementation details of the wrapper pass that holds a DominatorTree
+// suitable for use with the legacy pass manager.
+//
+//===----------------------------------------------------------------------===//
+
+char DominatorTreeWrapperPass::ID = 0;
+
+DominatorTreeWrapperPass::DominatorTreeWrapperPass() : FunctionPass(ID) {
+ initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
+}
+
+INITIALIZE_PASS(DominatorTreeWrapperPass, "domtree",
+ "Dominator Tree Construction", true, true)
+
+bool DominatorTreeWrapperPass::runOnFunction(Function &F) {
+ DT.recalculate(F);
+ return false;
+}
+
+void DominatorTreeWrapperPass::verifyAnalysis() const {
+ if (VerifyDomInfo)
+ assert(DT.verify(DominatorTree::VerificationLevel::Full));
+ else if (ExpensiveChecksEnabled)
+ assert(DT.verify(DominatorTree::VerificationLevel::Basic));
+}
+
+void DominatorTreeWrapperPass::print(raw_ostream &OS, const Module *) const {
+ DT.print(OS);
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/FPEnv.cpp b/contrib/llvm-project/llvm/lib/IR/FPEnv.cpp
new file mode 100644
index 000000000000..48ee84080e98
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/FPEnv.cpp
@@ -0,0 +1,130 @@
+//===-- FPEnv.cpp ---- FP Environment -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the implementations of entities that describe floating
+/// point environment.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/FPEnv.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+
+namespace llvm {
+
+Optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg) {
+ // For dynamic rounding mode, we use round to nearest but we will set the
+ // 'exact' SDNodeFlag so that the value will not be rounded.
+ return StringSwitch<Optional<RoundingMode>>(RoundingArg)
+ .Case("round.dynamic", RoundingMode::Dynamic)
+ .Case("round.tonearest", RoundingMode::NearestTiesToEven)
+ .Case("round.tonearestaway", RoundingMode::NearestTiesToAway)
+ .Case("round.downward", RoundingMode::TowardNegative)
+ .Case("round.upward", RoundingMode::TowardPositive)
+ .Case("round.towardzero", RoundingMode::TowardZero)
+ .Default(None);
+}
+
+Optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding) {
+ Optional<StringRef> RoundingStr = None;
+ switch (UseRounding) {
+ case RoundingMode::Dynamic:
+ RoundingStr = "round.dynamic";
+ break;
+ case RoundingMode::NearestTiesToEven:
+ RoundingStr = "round.tonearest";
+ break;
+ case RoundingMode::NearestTiesToAway:
+ RoundingStr = "round.tonearestaway";
+ break;
+ case RoundingMode::TowardNegative:
+ RoundingStr = "round.downward";
+ break;
+ case RoundingMode::TowardPositive:
+ RoundingStr = "round.upward";
+ break;
+ case RoundingMode::TowardZero:
+ RoundingStr = "round.towardzero";
+ break;
+ default:
+ break;
+ }
+ return RoundingStr;
+}
+
+Optional<fp::ExceptionBehavior>
+convertStrToExceptionBehavior(StringRef ExceptionArg) {
+ return StringSwitch<Optional<fp::ExceptionBehavior>>(ExceptionArg)
+ .Case("fpexcept.ignore", fp::ebIgnore)
+ .Case("fpexcept.maytrap", fp::ebMayTrap)
+ .Case("fpexcept.strict", fp::ebStrict)
+ .Default(None);
+}
+
+Optional<StringRef>
+convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept) {
+ Optional<StringRef> ExceptStr = None;
+ switch (UseExcept) {
+ case fp::ebStrict:
+ ExceptStr = "fpexcept.strict";
+ break;
+ case fp::ebIgnore:
+ ExceptStr = "fpexcept.ignore";
+ break;
+ case fp::ebMayTrap:
+ ExceptStr = "fpexcept.maytrap";
+ break;
+ }
+ return ExceptStr;
+}
+
+Intrinsic::ID getConstrainedIntrinsicID(const Instruction &Instr) {
+ Intrinsic::ID IID = Intrinsic::not_intrinsic;
+ switch (Instr.getOpcode()) {
+ case Instruction::FCmp:
+ // Unlike other instructions FCmp can be mapped to one of two intrinsic
+ // functions. We choose the non-signaling variant.
+ IID = Intrinsic::experimental_constrained_fcmp;
+ break;
+
+ // Instructions
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Instruction::NAME: \
+ IID = Intrinsic::INTRINSIC; \
+ break;
+#define FUNCTION(NAME, NARG, ROUND_MODE, INTRINSIC)
+#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)
+#include "llvm/IR/ConstrainedOps.def"
+
+ // Intrinsic calls.
+ case Instruction::Call:
+ if (auto *IntrinCall = dyn_cast<IntrinsicInst>(&Instr)) {
+ switch (IntrinCall->getIntrinsicID()) {
+#define FUNCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::NAME: \
+ IID = Intrinsic::INTRINSIC; \
+ break;
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)
+#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)
+#include "llvm/IR/ConstrainedOps.def"
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return IID;
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/Function.cpp b/contrib/llvm-project/llvm/lib/IR/Function.cpp
new file mode 100644
index 000000000000..53df94366760
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Function.cpp
@@ -0,0 +1,2056 @@
+//===- Function.cpp - Implement the Global object classes -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Function class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Function.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/AbstractCallSite.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/IntrinsicsBPF.h"
+#include "llvm/IR/IntrinsicsDirectX.h"
+#include "llvm/IR/IntrinsicsHexagon.h"
+#include "llvm/IR/IntrinsicsMips.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
+#include "llvm/IR/IntrinsicsPowerPC.h"
+#include "llvm/IR/IntrinsicsR600.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
+#include "llvm/IR/IntrinsicsS390.h"
+#include "llvm/IR/IntrinsicsVE.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
+#include "llvm/IR/IntrinsicsX86.h"
+#include "llvm/IR/IntrinsicsXCore.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <string>
+
+using namespace llvm;
+using ProfileCount = Function::ProfileCount;
+
+// Explicit instantiations of SymbolTableListTraits since some of the methods
+// are not in the public header file...
+template class llvm::SymbolTableListTraits<BasicBlock>;
+
+static cl::opt<unsigned> NonGlobalValueMaxNameSize(
+ "non-global-value-max-name-size", cl::Hidden, cl::init(1024),
+ cl::desc("Maximum size for the name of non-global values."));
+
+//===----------------------------------------------------------------------===//
+// Argument Implementation
+//===----------------------------------------------------------------------===//
+
+Argument::Argument(Type *Ty, const Twine &Name, Function *Par, unsigned ArgNo)
+ : Value(Ty, Value::ArgumentVal), Parent(Par), ArgNo(ArgNo) {
+ setName(Name);
+}
+
+void Argument::setParent(Function *parent) {
+ Parent = parent;
+}
+
+bool Argument::hasNonNullAttr(bool AllowUndefOrPoison) const {
+ if (!getType()->isPointerTy()) return false;
+ if (getParent()->hasParamAttribute(getArgNo(), Attribute::NonNull) &&
+ (AllowUndefOrPoison ||
+ getParent()->hasParamAttribute(getArgNo(), Attribute::NoUndef)))
+ return true;
+ else if (getDereferenceableBytes() > 0 &&
+ !NullPointerIsDefined(getParent(),
+ getType()->getPointerAddressSpace()))
+ return true;
+ return false;
+}
+
+bool Argument::hasByValAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::ByVal);
+}
+
+bool Argument::hasByRefAttr() const {
+ if (!getType()->isPointerTy())
+ return false;
+ return hasAttribute(Attribute::ByRef);
+}
+
+bool Argument::hasSwiftSelfAttr() const {
+ return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftSelf);
+}
+
+bool Argument::hasSwiftErrorAttr() const {
+ return getParent()->hasParamAttribute(getArgNo(), Attribute::SwiftError);
+}
+
+bool Argument::hasInAllocaAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::InAlloca);
+}
+
+bool Argument::hasPreallocatedAttr() const {
+ if (!getType()->isPointerTy())
+ return false;
+ return hasAttribute(Attribute::Preallocated);
+}
+
+bool Argument::hasPassPointeeByValueCopyAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ AttributeList Attrs = getParent()->getAttributes();
+ return Attrs.hasParamAttr(getArgNo(), Attribute::ByVal) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::InAlloca) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::Preallocated);
+}
+
+bool Argument::hasPointeeInMemoryValueAttr() const {
+ if (!getType()->isPointerTy())
+ return false;
+ AttributeList Attrs = getParent()->getAttributes();
+ return Attrs.hasParamAttr(getArgNo(), Attribute::ByVal) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::StructRet) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::InAlloca) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::Preallocated) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::ByRef);
+}
+
+/// For a byval, sret, inalloca, or preallocated parameter, get the in-memory
+/// parameter type.
+static Type *getMemoryParamAllocType(AttributeSet ParamAttrs) {
+ // FIXME: All the type carrying attributes are mutually exclusive, so there
+ // should be a single query to get the stored type that handles any of them.
+ if (Type *ByValTy = ParamAttrs.getByValType())
+ return ByValTy;
+ if (Type *ByRefTy = ParamAttrs.getByRefType())
+ return ByRefTy;
+ if (Type *PreAllocTy = ParamAttrs.getPreallocatedType())
+ return PreAllocTy;
+ if (Type *InAllocaTy = ParamAttrs.getInAllocaType())
+ return InAllocaTy;
+ if (Type *SRetTy = ParamAttrs.getStructRetType())
+ return SRetTy;
+
+ return nullptr;
+}
+
+uint64_t Argument::getPassPointeeByValueCopySize(const DataLayout &DL) const {
+ AttributeSet ParamAttrs =
+ getParent()->getAttributes().getParamAttrs(getArgNo());
+ if (Type *MemTy = getMemoryParamAllocType(ParamAttrs))
+ return DL.getTypeAllocSize(MemTy);
+ return 0;
+}
+
+Type *Argument::getPointeeInMemoryValueType() const {
+ AttributeSet ParamAttrs =
+ getParent()->getAttributes().getParamAttrs(getArgNo());
+ return getMemoryParamAllocType(ParamAttrs);
+}
+
+uint64_t Argument::getParamAlignment() const {
+ assert(getType()->isPointerTy() && "Only pointers have alignments");
+ return getParent()->getParamAlignment(getArgNo());
+}
+
+MaybeAlign Argument::getParamAlign() const {
+ assert(getType()->isPointerTy() && "Only pointers have alignments");
+ return getParent()->getParamAlign(getArgNo());
+}
+
+MaybeAlign Argument::getParamStackAlign() const {
+ return getParent()->getParamStackAlign(getArgNo());
+}
+
+Type *Argument::getParamByValType() const {
+ assert(getType()->isPointerTy() && "Only pointers have byval types");
+ return getParent()->getParamByValType(getArgNo());
+}
+
+Type *Argument::getParamStructRetType() const {
+ assert(getType()->isPointerTy() && "Only pointers have sret types");
+ return getParent()->getParamStructRetType(getArgNo());
+}
+
+Type *Argument::getParamByRefType() const {
+ assert(getType()->isPointerTy() && "Only pointers have byref types");
+ return getParent()->getParamByRefType(getArgNo());
+}
+
+Type *Argument::getParamInAllocaType() const {
+ assert(getType()->isPointerTy() && "Only pointers have inalloca types");
+ return getParent()->getParamInAllocaType(getArgNo());
+}
+
+uint64_t Argument::getDereferenceableBytes() const {
+ assert(getType()->isPointerTy() &&
+ "Only pointers have dereferenceable bytes");
+ return getParent()->getParamDereferenceableBytes(getArgNo());
+}
+
+uint64_t Argument::getDereferenceableOrNullBytes() const {
+ assert(getType()->isPointerTy() &&
+ "Only pointers have dereferenceable bytes");
+ return getParent()->getParamDereferenceableOrNullBytes(getArgNo());
+}
+
+bool Argument::hasNestAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::Nest);
+}
+
+bool Argument::hasNoAliasAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::NoAlias);
+}
+
+bool Argument::hasNoCaptureAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::NoCapture);
+}
+
+bool Argument::hasNoFreeAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::NoFree);
+}
+
+bool Argument::hasStructRetAttr() const {
+ if (!getType()->isPointerTy()) return false;
+ return hasAttribute(Attribute::StructRet);
+}
+
+bool Argument::hasInRegAttr() const {
+ return hasAttribute(Attribute::InReg);
+}
+
+bool Argument::hasReturnedAttr() const {
+ return hasAttribute(Attribute::Returned);
+}
+
+bool Argument::hasZExtAttr() const {
+ return hasAttribute(Attribute::ZExt);
+}
+
+bool Argument::hasSExtAttr() const {
+ return hasAttribute(Attribute::SExt);
+}
+
+bool Argument::onlyReadsMemory() const {
+ AttributeList Attrs = getParent()->getAttributes();
+ return Attrs.hasParamAttr(getArgNo(), Attribute::ReadOnly) ||
+ Attrs.hasParamAttr(getArgNo(), Attribute::ReadNone);
+}
+
+void Argument::addAttrs(AttrBuilder &B) {
+ AttributeList AL = getParent()->getAttributes();
+ AL = AL.addParamAttributes(Parent->getContext(), getArgNo(), B);
+ getParent()->setAttributes(AL);
+}
+
+void Argument::addAttr(Attribute::AttrKind Kind) {
+ getParent()->addParamAttr(getArgNo(), Kind);
+}
+
+void Argument::addAttr(Attribute Attr) {
+ getParent()->addParamAttr(getArgNo(), Attr);
+}
+
+void Argument::removeAttr(Attribute::AttrKind Kind) {
+ getParent()->removeParamAttr(getArgNo(), Kind);
+}
+
+void Argument::removeAttrs(const AttributeMask &AM) {
+ AttributeList AL = getParent()->getAttributes();
+ AL = AL.removeParamAttributes(Parent->getContext(), getArgNo(), AM);
+ getParent()->setAttributes(AL);
+}
+
+bool Argument::hasAttribute(Attribute::AttrKind Kind) const {
+ return getParent()->hasParamAttribute(getArgNo(), Kind);
+}
+
+Attribute Argument::getAttribute(Attribute::AttrKind Kind) const {
+ return getParent()->getParamAttribute(getArgNo(), Kind);
+}
+
+//===----------------------------------------------------------------------===//
+// Helper Methods in Function
+//===----------------------------------------------------------------------===//
+
+LLVMContext &Function::getContext() const {
+ return getType()->getContext();
+}
+
+unsigned Function::getInstructionCount() const {
+ unsigned NumInstrs = 0;
+ for (const BasicBlock &BB : BasicBlocks)
+ NumInstrs += std::distance(BB.instructionsWithoutDebug().begin(),
+ BB.instructionsWithoutDebug().end());
+ return NumInstrs;
+}
+
+Function *Function::Create(FunctionType *Ty, LinkageTypes Linkage,
+ const Twine &N, Module &M) {
+ return Create(Ty, Linkage, M.getDataLayout().getProgramAddressSpace(), N, &M);
+}
+
+Function *Function::createWithDefaultAttr(FunctionType *Ty,
+ LinkageTypes Linkage,
+ unsigned AddrSpace, const Twine &N,
+ Module *M) {
+ auto *F = new Function(Ty, Linkage, AddrSpace, N, M);
+ AttrBuilder B(F->getContext());
+ UWTableKind UWTable = M->getUwtable();
+ if (UWTable != UWTableKind::None)
+ B.addUWTableAttr(UWTable);
+ switch (M->getFramePointer()) {
+ case FramePointerKind::None:
+ // 0 ("none") is the default.
+ break;
+ case FramePointerKind::NonLeaf:
+ B.addAttribute("frame-pointer", "non-leaf");
+ break;
+ case FramePointerKind::All:
+ B.addAttribute("frame-pointer", "all");
+ break;
+ }
+ F->addFnAttrs(B);
+ return F;
+}
+
+void Function::removeFromParent() {
+ getParent()->getFunctionList().remove(getIterator());
+}
+
+void Function::eraseFromParent() {
+ getParent()->getFunctionList().erase(getIterator());
+}
+
+//===----------------------------------------------------------------------===//
+// Function Implementation
+//===----------------------------------------------------------------------===//
+
+static unsigned computeAddrSpace(unsigned AddrSpace, Module *M) {
+ // If AS == -1 and we are passed a valid module pointer we place the function
+ // in the program address space. Otherwise we default to AS0.
+ if (AddrSpace == static_cast<unsigned>(-1))
+ return M ? M->getDataLayout().getProgramAddressSpace() : 0;
+ return AddrSpace;
+}
+
+Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
+ const Twine &name, Module *ParentModule)
+ : GlobalObject(Ty, Value::FunctionVal,
+ OperandTraits<Function>::op_begin(this), 0, Linkage, name,
+ computeAddrSpace(AddrSpace, ParentModule)),
+ NumArgs(Ty->getNumParams()) {
+ assert(FunctionType::isValidReturnType(getReturnType()) &&
+ "invalid return type");
+ setGlobalObjectSubClassData(0);
+
+ // We only need a symbol table for a function if the context keeps value names
+ if (!getContext().shouldDiscardValueNames())
+ SymTab = std::make_unique<ValueSymbolTable>(NonGlobalValueMaxNameSize);
+
+ // If the function has arguments, mark them as lazily built.
+ if (Ty->getNumParams())
+ setValueSubclassData(1); // Set the "has lazy arguments" bit.
+
+ if (ParentModule)
+ ParentModule->getFunctionList().push_back(this);
+
+ HasLLVMReservedName = getName().startswith("llvm.");
+ // Ensure intrinsics have the right parameter attributes.
+ // Note, the IntID field will have been set in Value::setName if this function
+ // name is a valid intrinsic ID.
+ if (IntID)
+ setAttributes(Intrinsic::getAttributes(getContext(), IntID));
+}
+
+Function::~Function() {
+ dropAllReferences(); // After this it is safe to delete instructions.
+
+ // Delete all of the method arguments and unlink from symbol table...
+ if (Arguments)
+ clearArguments();
+
+ // Remove the function from the on-the-side GC table.
+ clearGC();
+}
+
+void Function::BuildLazyArguments() const {
+ // Create the arguments vector, all arguments start out unnamed.
+ auto *FT = getFunctionType();
+ if (NumArgs > 0) {
+ Arguments = std::allocator<Argument>().allocate(NumArgs);
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+ Type *ArgTy = FT->getParamType(i);
+ assert(!ArgTy->isVoidTy() && "Cannot have void typed arguments!");
+ new (Arguments + i) Argument(ArgTy, "", const_cast<Function *>(this), i);
+ }
+ }
+
+ // Clear the lazy arguments bit.
+ unsigned SDC = getSubclassDataFromValue();
+ SDC &= ~(1 << 0);
+ const_cast<Function*>(this)->setValueSubclassData(SDC);
+ assert(!hasLazyArguments());
+}
+
+static MutableArrayRef<Argument> makeArgArray(Argument *Args, size_t Count) {
+ return MutableArrayRef<Argument>(Args, Count);
+}
+
+bool Function::isConstrainedFPIntrinsic() const {
+ switch (getIntrinsicID()) {
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC:
+#include "llvm/IR/ConstrainedOps.def"
+ return true;
+#undef INSTRUCTION
+ default:
+ return false;
+ }
+}
+
+void Function::clearArguments() {
+ for (Argument &A : makeArgArray(Arguments, NumArgs)) {
+ A.setName("");
+ A.~Argument();
+ }
+ std::allocator<Argument>().deallocate(Arguments, NumArgs);
+ Arguments = nullptr;
+}
+
+void Function::stealArgumentListFrom(Function &Src) {
+ assert(isDeclaration() && "Expected no references to current arguments");
+
+ // Drop the current arguments, if any, and set the lazy argument bit.
+ if (!hasLazyArguments()) {
+ assert(llvm::all_of(makeArgArray(Arguments, NumArgs),
+ [](const Argument &A) { return A.use_empty(); }) &&
+ "Expected arguments to be unused in declaration");
+ clearArguments();
+ setValueSubclassData(getSubclassDataFromValue() | (1 << 0));
+ }
+
+ // Nothing to steal if Src has lazy arguments.
+ if (Src.hasLazyArguments())
+ return;
+
+ // Steal arguments from Src, and fix the lazy argument bits.
+ assert(arg_size() == Src.arg_size());
+ Arguments = Src.Arguments;
+ Src.Arguments = nullptr;
+ for (Argument &A : makeArgArray(Arguments, NumArgs)) {
+ // FIXME: This does the work of transferNodesFromList inefficiently.
+ SmallString<128> Name;
+ if (A.hasName())
+ Name = A.getName();
+ if (!Name.empty())
+ A.setName("");
+ A.setParent(this);
+ if (!Name.empty())
+ A.setName(Name);
+ }
+
+ setValueSubclassData(getSubclassDataFromValue() & ~(1 << 0));
+ assert(!hasLazyArguments());
+ Src.setValueSubclassData(Src.getSubclassDataFromValue() | (1 << 0));
+}
+
+// dropAllReferences() - This function causes all the subinstructions to "let
+// go" of all references that they are maintaining. This allows one to
+// 'delete' a whole class at a time, even though there may be circular
+// references... first all references are dropped, and all use counts go to
+// zero. Then everything is deleted for real. Note that no operations are
+// valid on an object that has "dropped all references", except operator
+// delete.
+//
+void Function::dropAllReferences() {
+ setIsMaterializable(false);
+
+ for (BasicBlock &BB : *this)
+ BB.dropAllReferences();
+
+ // Delete all basic blocks. They are now unused, except possibly by
+ // blockaddresses, but BasicBlock's destructor takes care of those.
+ while (!BasicBlocks.empty())
+ BasicBlocks.begin()->eraseFromParent();
+
+ // Drop uses of any optional data (real or placeholder).
+ if (getNumOperands()) {
+ User::dropAllReferences();
+ setNumHungOffUseOperands(0);
+ setValueSubclassData(getSubclassDataFromValue() & ~0xe);
+ }
+
+ // Metadata is stored in a side-table.
+ clearMetadata();
+}
+
+void Function::addAttributeAtIndex(unsigned i, Attribute Attr) {
+ AttributeSets = AttributeSets.addAttributeAtIndex(getContext(), i, Attr);
+}
+
+void Function::addFnAttr(Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.addFnAttribute(getContext(), Kind);
+}
+
+void Function::addFnAttr(StringRef Kind, StringRef Val) {
+ AttributeSets = AttributeSets.addFnAttribute(getContext(), Kind, Val);
+}
+
+void Function::addFnAttr(Attribute Attr) {
+ AttributeSets = AttributeSets.addFnAttribute(getContext(), Attr);
+}
+
+void Function::addFnAttrs(const AttrBuilder &Attrs) {
+ AttributeSets = AttributeSets.addFnAttributes(getContext(), Attrs);
+}
+
+void Function::addRetAttr(Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.addRetAttribute(getContext(), Kind);
+}
+
+void Function::addRetAttr(Attribute Attr) {
+ AttributeSets = AttributeSets.addRetAttribute(getContext(), Attr);
+}
+
+void Function::addRetAttrs(const AttrBuilder &Attrs) {
+ AttributeSets = AttributeSets.addRetAttributes(getContext(), Attrs);
+}
+
+void Function::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.addParamAttribute(getContext(), ArgNo, Kind);
+}
+
+void Function::addParamAttr(unsigned ArgNo, Attribute Attr) {
+ AttributeSets = AttributeSets.addParamAttribute(getContext(), ArgNo, Attr);
+}
+
+void Function::addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs) {
+ AttributeSets = AttributeSets.addParamAttributes(getContext(), ArgNo, Attrs);
+}
+
+void Function::removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.removeAttributeAtIndex(getContext(), i, Kind);
+}
+
+void Function::removeAttributeAtIndex(unsigned i, StringRef Kind) {
+ AttributeSets = AttributeSets.removeAttributeAtIndex(getContext(), i, Kind);
+}
+
+void Function::removeFnAttr(Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind);
+}
+
+void Function::removeFnAttr(StringRef Kind) {
+ AttributeSets = AttributeSets.removeFnAttribute(getContext(), Kind);
+}
+
+void Function::removeFnAttrs(const AttributeMask &AM) {
+ AttributeSets = AttributeSets.removeFnAttributes(getContext(), AM);
+}
+
+void Function::removeRetAttr(Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind);
+}
+
+void Function::removeRetAttr(StringRef Kind) {
+ AttributeSets = AttributeSets.removeRetAttribute(getContext(), Kind);
+}
+
+void Function::removeRetAttrs(const AttributeMask &Attrs) {
+ AttributeSets = AttributeSets.removeRetAttributes(getContext(), Attrs);
+}
+
+void Function::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
+ AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind);
+}
+
+void Function::removeParamAttr(unsigned ArgNo, StringRef Kind) {
+ AttributeSets = AttributeSets.removeParamAttribute(getContext(), ArgNo, Kind);
+}
+
+void Function::removeParamAttrs(unsigned ArgNo, const AttributeMask &Attrs) {
+ AttributeSets =
+ AttributeSets.removeParamAttributes(getContext(), ArgNo, Attrs);
+}
+
+void Function::addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes) {
+ AttributeSets =
+ AttributeSets.addDereferenceableParamAttr(getContext(), ArgNo, Bytes);
+}
+
+bool Function::hasFnAttribute(Attribute::AttrKind Kind) const {
+ return AttributeSets.hasFnAttr(Kind);
+}
+
+bool Function::hasFnAttribute(StringRef Kind) const {
+ return AttributeSets.hasFnAttr(Kind);
+}
+
+bool Function::hasRetAttribute(Attribute::AttrKind Kind) const {
+ return AttributeSets.hasRetAttr(Kind);
+}
+
+bool Function::hasParamAttribute(unsigned ArgNo,
+ Attribute::AttrKind Kind) const {
+ return AttributeSets.hasParamAttr(ArgNo, Kind);
+}
+
+Attribute Function::getAttributeAtIndex(unsigned i,
+ Attribute::AttrKind Kind) const {
+ return AttributeSets.getAttributeAtIndex(i, Kind);
+}
+
+Attribute Function::getAttributeAtIndex(unsigned i, StringRef Kind) const {
+ return AttributeSets.getAttributeAtIndex(i, Kind);
+}
+
+Attribute Function::getFnAttribute(Attribute::AttrKind Kind) const {
+ return AttributeSets.getFnAttr(Kind);
+}
+
+Attribute Function::getFnAttribute(StringRef Kind) const {
+ return AttributeSets.getFnAttr(Kind);
+}
+
+/// gets the specified attribute from the list of attributes.
+Attribute Function::getParamAttribute(unsigned ArgNo,
+ Attribute::AttrKind Kind) const {
+ return AttributeSets.getParamAttr(ArgNo, Kind);
+}
+
+void Function::addDereferenceableOrNullParamAttr(unsigned ArgNo,
+ uint64_t Bytes) {
+ AttributeSets = AttributeSets.addDereferenceableOrNullParamAttr(getContext(),
+ ArgNo, Bytes);
+}
+
+DenormalMode Function::getDenormalMode(const fltSemantics &FPType) const {
+ if (&FPType == &APFloat::IEEEsingle()) {
+ Attribute Attr = getFnAttribute("denormal-fp-math-f32");
+ StringRef Val = Attr.getValueAsString();
+ if (!Val.empty())
+ return parseDenormalFPAttribute(Val);
+
+ // If the f32 variant of the attribute isn't specified, try to use the
+ // generic one.
+ }
+
+ Attribute Attr = getFnAttribute("denormal-fp-math");
+ return parseDenormalFPAttribute(Attr.getValueAsString());
+}
+
+const std::string &Function::getGC() const {
+ assert(hasGC() && "Function has no collector");
+ return getContext().getGC(*this);
+}
+
+void Function::setGC(std::string Str) {
+ setValueSubclassDataBit(14, !Str.empty());
+ getContext().setGC(*this, std::move(Str));
+}
+
+void Function::clearGC() {
+ if (!hasGC())
+ return;
+ getContext().deleteGC(*this);
+ setValueSubclassDataBit(14, false);
+}
+
+bool Function::hasStackProtectorFnAttr() const {
+ return hasFnAttribute(Attribute::StackProtect) ||
+ hasFnAttribute(Attribute::StackProtectStrong) ||
+ hasFnAttribute(Attribute::StackProtectReq);
+}
+
+/// Copy all additional attributes (those not needed to create a Function) from
+/// the Function Src to this one.
+void Function::copyAttributesFrom(const Function *Src) {
+ GlobalObject::copyAttributesFrom(Src);
+ setCallingConv(Src->getCallingConv());
+ setAttributes(Src->getAttributes());
+ if (Src->hasGC())
+ setGC(Src->getGC());
+ else
+ clearGC();
+ if (Src->hasPersonalityFn())
+ setPersonalityFn(Src->getPersonalityFn());
+ if (Src->hasPrefixData())
+ setPrefixData(Src->getPrefixData());
+ if (Src->hasPrologueData())
+ setPrologueData(Src->getPrologueData());
+}
+
+/// Table of string intrinsic names indexed by enum value.
+static const char * const IntrinsicNameTable[] = {
+ "not_intrinsic",
+#define GET_INTRINSIC_NAME_TABLE
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_INTRINSIC_NAME_TABLE
+};
+
+/// Table of per-target intrinsic name tables.
+#define GET_INTRINSIC_TARGET_DATA
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_INTRINSIC_TARGET_DATA
+
+bool Function::isTargetIntrinsic(Intrinsic::ID IID) {
+ return IID > TargetInfos[0].Count;
+}
+
+bool Function::isTargetIntrinsic() const {
+ return isTargetIntrinsic(IntID);
+}
+
+/// Find the segment of \c IntrinsicNameTable for intrinsics with the same
+/// target as \c Name, or the generic table if \c Name is not target specific.
+///
+/// Returns the relevant slice of \c IntrinsicNameTable
+static ArrayRef<const char *> findTargetSubtable(StringRef Name) {
+ assert(Name.startswith("llvm."));
+
+ ArrayRef<IntrinsicTargetInfo> Targets(TargetInfos);
+ // Drop "llvm." and take the first dotted component. That will be the target
+ // if this is target specific.
+ StringRef Target = Name.drop_front(5).split('.').first;
+ auto It = partition_point(
+ Targets, [=](const IntrinsicTargetInfo &TI) { return TI.Name < Target; });
+ // We've either found the target or just fall back to the generic set, which
+ // is always first.
+ const auto &TI = It != Targets.end() && It->Name == Target ? *It : Targets[0];
+ return makeArrayRef(&IntrinsicNameTable[1] + TI.Offset, TI.Count);
+}
+
+/// This does the actual lookup of an intrinsic ID which
+/// matches the given function name.
+Intrinsic::ID Function::lookupIntrinsicID(StringRef Name) {
+ ArrayRef<const char *> NameTable = findTargetSubtable(Name);
+ int Idx = Intrinsic::lookupLLVMIntrinsicByName(NameTable, Name);
+ if (Idx == -1)
+ return Intrinsic::not_intrinsic;
+
+ // Intrinsic IDs correspond to the location in IntrinsicNameTable, but we have
+ // an index into a sub-table.
+ int Adjust = NameTable.data() - IntrinsicNameTable;
+ Intrinsic::ID ID = static_cast<Intrinsic::ID>(Idx + Adjust);
+
+ // If the intrinsic is not overloaded, require an exact match. If it is
+ // overloaded, require either exact or prefix match.
+ const auto MatchSize = strlen(NameTable[Idx]);
+ assert(Name.size() >= MatchSize && "Expected either exact or prefix match");
+ bool IsExactMatch = Name.size() == MatchSize;
+ return IsExactMatch || Intrinsic::isOverloaded(ID) ? ID
+ : Intrinsic::not_intrinsic;
+}
+
+void Function::recalculateIntrinsicID() {
+ StringRef Name = getName();
+ if (!Name.startswith("llvm.")) {
+ HasLLVMReservedName = false;
+ IntID = Intrinsic::not_intrinsic;
+ return;
+ }
+ HasLLVMReservedName = true;
+ IntID = lookupIntrinsicID(Name);
+}
+
+/// Returns a stable mangling for the type specified for use in the name
+/// mangling scheme used by 'any' types in intrinsic signatures. The mangling
+/// of named types is simply their name. Manglings for unnamed types consist
+/// of a prefix ('p' for pointers, 'a' for arrays, 'f_' for functions)
+/// combined with the mangling of their component types. A vararg function
+/// type will have a suffix of 'vararg'. Since function types can contain
+/// other function types, we close a function type mangling with suffix 'f'
+/// which can't be confused with it's prefix. This ensures we don't have
+/// collisions between two unrelated function types. Otherwise, you might
+/// parse ffXX as f(fXX) or f(fX)X. (X is a placeholder for any other type.)
+/// The HasUnnamedType boolean is set if an unnamed type was encountered,
+/// indicating that extra care must be taken to ensure a unique name.
+static std::string getMangledTypeStr(Type *Ty, bool &HasUnnamedType) {
+ std::string Result;
+ if (PointerType *PTyp = dyn_cast<PointerType>(Ty)) {
+ Result += "p" + utostr(PTyp->getAddressSpace());
+ // Opaque pointer doesn't have pointee type information, so we just mangle
+ // address space for opaque pointer.
+ if (!PTyp->isOpaque())
+ Result += getMangledTypeStr(PTyp->getNonOpaquePointerElementType(),
+ HasUnnamedType);
+ } else if (ArrayType *ATyp = dyn_cast<ArrayType>(Ty)) {
+ Result += "a" + utostr(ATyp->getNumElements()) +
+ getMangledTypeStr(ATyp->getElementType(), HasUnnamedType);
+ } else if (StructType *STyp = dyn_cast<StructType>(Ty)) {
+ if (!STyp->isLiteral()) {
+ Result += "s_";
+ if (STyp->hasName())
+ Result += STyp->getName();
+ else
+ HasUnnamedType = true;
+ } else {
+ Result += "sl_";
+ for (auto Elem : STyp->elements())
+ Result += getMangledTypeStr(Elem, HasUnnamedType);
+ }
+ // Ensure nested structs are distinguishable.
+ Result += "s";
+ } else if (FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
+ Result += "f_" + getMangledTypeStr(FT->getReturnType(), HasUnnamedType);
+ for (size_t i = 0; i < FT->getNumParams(); i++)
+ Result += getMangledTypeStr(FT->getParamType(i), HasUnnamedType);
+ if (FT->isVarArg())
+ Result += "vararg";
+ // Ensure nested function types are distinguishable.
+ Result += "f";
+ } else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ ElementCount EC = VTy->getElementCount();
+ if (EC.isScalable())
+ Result += "nx";
+ Result += "v" + utostr(EC.getKnownMinValue()) +
+ getMangledTypeStr(VTy->getElementType(), HasUnnamedType);
+ } else if (Ty) {
+ switch (Ty->getTypeID()) {
+ default: llvm_unreachable("Unhandled type");
+ case Type::VoidTyID: Result += "isVoid"; break;
+ case Type::MetadataTyID: Result += "Metadata"; break;
+ case Type::HalfTyID: Result += "f16"; break;
+ case Type::BFloatTyID: Result += "bf16"; break;
+ case Type::FloatTyID: Result += "f32"; break;
+ case Type::DoubleTyID: Result += "f64"; break;
+ case Type::X86_FP80TyID: Result += "f80"; break;
+ case Type::FP128TyID: Result += "f128"; break;
+ case Type::PPC_FP128TyID: Result += "ppcf128"; break;
+ case Type::X86_MMXTyID: Result += "x86mmx"; break;
+ case Type::X86_AMXTyID: Result += "x86amx"; break;
+ case Type::IntegerTyID:
+ Result += "i" + utostr(cast<IntegerType>(Ty)->getBitWidth());
+ break;
+ }
+ }
+ return Result;
+}
+
+StringRef Intrinsic::getBaseName(ID id) {
+ assert(id < num_intrinsics && "Invalid intrinsic ID!");
+ return IntrinsicNameTable[id];
+}
+
+StringRef Intrinsic::getName(ID id) {
+ assert(id < num_intrinsics && "Invalid intrinsic ID!");
+ assert(!Intrinsic::isOverloaded(id) &&
+ "This version of getName does not support overloading");
+ return getBaseName(id);
+}
+
+static std::string getIntrinsicNameImpl(Intrinsic::ID Id, ArrayRef<Type *> Tys,
+ Module *M, FunctionType *FT,
+ bool EarlyModuleCheck) {
+
+ assert(Id < Intrinsic::num_intrinsics && "Invalid intrinsic ID!");
+ assert((Tys.empty() || Intrinsic::isOverloaded(Id)) &&
+ "This version of getName is for overloaded intrinsics only");
+ (void)EarlyModuleCheck;
+ assert((!EarlyModuleCheck || M ||
+ !any_of(Tys, [](Type *T) { return isa<PointerType>(T); })) &&
+ "Intrinsic overloading on pointer types need to provide a Module");
+ bool HasUnnamedType = false;
+ std::string Result(Intrinsic::getBaseName(Id));
+ for (Type *Ty : Tys)
+ Result += "." + getMangledTypeStr(Ty, HasUnnamedType);
+ if (HasUnnamedType) {
+ assert(M && "unnamed types need a module");
+ if (!FT)
+ FT = Intrinsic::getType(M->getContext(), Id, Tys);
+ else
+ assert((FT == Intrinsic::getType(M->getContext(), Id, Tys)) &&
+ "Provided FunctionType must match arguments");
+ return M->getUniqueIntrinsicName(Result, Id, FT);
+ }
+ return Result;
+}
+
+std::string Intrinsic::getName(ID Id, ArrayRef<Type *> Tys, Module *M,
+ FunctionType *FT) {
+ assert(M && "We need to have a Module");
+ return getIntrinsicNameImpl(Id, Tys, M, FT, true);
+}
+
+std::string Intrinsic::getNameNoUnnamedTypes(ID Id, ArrayRef<Type *> Tys) {
+ return getIntrinsicNameImpl(Id, Tys, nullptr, nullptr, false);
+}
+
+/// IIT_Info - These are enumerators that describe the entries returned by the
+/// getIntrinsicInfoTableEntries function.
+///
+/// NOTE: This must be kept in synch with the copy in TblGen/IntrinsicEmitter!
+enum IIT_Info {
+ // Common values should be encoded with 0-15.
+ IIT_Done = 0,
+ IIT_I1 = 1,
+ IIT_I8 = 2,
+ IIT_I16 = 3,
+ IIT_I32 = 4,
+ IIT_I64 = 5,
+ IIT_F16 = 6,
+ IIT_F32 = 7,
+ IIT_F64 = 8,
+ IIT_V2 = 9,
+ IIT_V4 = 10,
+ IIT_V8 = 11,
+ IIT_V16 = 12,
+ IIT_V32 = 13,
+ IIT_PTR = 14,
+ IIT_ARG = 15,
+
+ // Values from 16+ are only encodable with the inefficient encoding.
+ IIT_V64 = 16,
+ IIT_MMX = 17,
+ IIT_TOKEN = 18,
+ IIT_METADATA = 19,
+ IIT_EMPTYSTRUCT = 20,
+ IIT_STRUCT2 = 21,
+ IIT_STRUCT3 = 22,
+ IIT_STRUCT4 = 23,
+ IIT_STRUCT5 = 24,
+ IIT_EXTEND_ARG = 25,
+ IIT_TRUNC_ARG = 26,
+ IIT_ANYPTR = 27,
+ IIT_V1 = 28,
+ IIT_VARARG = 29,
+ IIT_HALF_VEC_ARG = 30,
+ IIT_SAME_VEC_WIDTH_ARG = 31,
+ IIT_PTR_TO_ARG = 32,
+ IIT_PTR_TO_ELT = 33,
+ IIT_VEC_OF_ANYPTRS_TO_ELT = 34,
+ IIT_I128 = 35,
+ IIT_V512 = 36,
+ IIT_V1024 = 37,
+ IIT_STRUCT6 = 38,
+ IIT_STRUCT7 = 39,
+ IIT_STRUCT8 = 40,
+ IIT_F128 = 41,
+ IIT_VEC_ELEMENT = 42,
+ IIT_SCALABLE_VEC = 43,
+ IIT_SUBDIVIDE2_ARG = 44,
+ IIT_SUBDIVIDE4_ARG = 45,
+ IIT_VEC_OF_BITCASTS_TO_INT = 46,
+ IIT_V128 = 47,
+ IIT_BF16 = 48,
+ IIT_STRUCT9 = 49,
+ IIT_V256 = 50,
+ IIT_AMX = 51,
+ IIT_PPCF128 = 52,
+ IIT_V3 = 53,
+ IIT_EXTERNREF = 54,
+ IIT_FUNCREF = 55,
+ IIT_ANYPTR_TO_ELT = 56,
+ IIT_I2 = 57,
+ IIT_I4 = 58,
+};
+
+static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
+ IIT_Info LastInfo,
+ SmallVectorImpl<Intrinsic::IITDescriptor> &OutputTable) {
+ using namespace Intrinsic;
+
+ bool IsScalableVector = (LastInfo == IIT_SCALABLE_VEC);
+
+ IIT_Info Info = IIT_Info(Infos[NextElt++]);
+ unsigned StructElts = 2;
+
+ switch (Info) {
+ case IIT_Done:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Void, 0));
+ return;
+ case IIT_VARARG:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::VarArg, 0));
+ return;
+ case IIT_MMX:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::MMX, 0));
+ return;
+ case IIT_AMX:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::AMX, 0));
+ return;
+ case IIT_TOKEN:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Token, 0));
+ return;
+ case IIT_METADATA:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Metadata, 0));
+ return;
+ case IIT_F16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Half, 0));
+ return;
+ case IIT_BF16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::BFloat, 0));
+ return;
+ case IIT_F32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Float, 0));
+ return;
+ case IIT_F64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Double, 0));
+ return;
+ case IIT_F128:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Quad, 0));
+ return;
+ case IIT_PPCF128:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::PPCQuad, 0));
+ return;
+ case IIT_I1:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 1));
+ return;
+ case IIT_I2:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 2));
+ return;
+ case IIT_I4:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 4));
+ return;
+ case IIT_I8:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
+ return;
+ case IIT_I16:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer,16));
+ return;
+ case IIT_I32:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 32));
+ return;
+ case IIT_I64:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 64));
+ return;
+ case IIT_I128:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 128));
+ return;
+ case IIT_V1:
+ OutputTable.push_back(IITDescriptor::getVector(1, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V2:
+ OutputTable.push_back(IITDescriptor::getVector(2, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V3:
+ OutputTable.push_back(IITDescriptor::getVector(3, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V4:
+ OutputTable.push_back(IITDescriptor::getVector(4, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V8:
+ OutputTable.push_back(IITDescriptor::getVector(8, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V16:
+ OutputTable.push_back(IITDescriptor::getVector(16, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V32:
+ OutputTable.push_back(IITDescriptor::getVector(32, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V64:
+ OutputTable.push_back(IITDescriptor::getVector(64, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V128:
+ OutputTable.push_back(IITDescriptor::getVector(128, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V256:
+ OutputTable.push_back(IITDescriptor::getVector(256, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V512:
+ OutputTable.push_back(IITDescriptor::getVector(512, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_V1024:
+ OutputTable.push_back(IITDescriptor::getVector(1024, IsScalableVector));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_EXTERNREF:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10));
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
+ return;
+ case IIT_FUNCREF:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20));
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8));
+ return;
+ case IIT_PTR:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ case IIT_ANYPTR: { // [ANYPTR addrspace, subtype]
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer,
+ Infos[NextElt++]));
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ }
+ case IIT_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo));
+ return;
+ }
+ case IIT_EXTEND_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::ExtendArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_TRUNC_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::TruncArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_HALF_VEC_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::HalfVecArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_SAME_VEC_WIDTH_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::SameVecWidthArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_PTR_TO_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_PTR_TO_ELT: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToElt, ArgInfo));
+ return;
+ }
+ case IIT_ANYPTR_TO_ELT: {
+ unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(
+ IITDescriptor::get(IITDescriptor::AnyPtrToElt, ArgNo, RefNo));
+ return;
+ }
+ case IIT_VEC_OF_ANYPTRS_TO_ELT: {
+ unsigned short ArgNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ unsigned short RefNo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(
+ IITDescriptor::get(IITDescriptor::VecOfAnyPtrsToElt, ArgNo, RefNo));
+ return;
+ }
+ case IIT_EMPTYSTRUCT:
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0));
+ return;
+ case IIT_STRUCT9: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT8: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT7: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT6: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT5: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT4: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT3: ++StructElts; LLVM_FALLTHROUGH;
+ case IIT_STRUCT2: {
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct,StructElts));
+
+ for (unsigned i = 0; i != StructElts; ++i)
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ }
+ case IIT_SUBDIVIDE2_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide2Argument,
+ ArgInfo));
+ return;
+ }
+ case IIT_SUBDIVIDE4_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide4Argument,
+ ArgInfo));
+ return;
+ }
+ case IIT_VEC_ELEMENT: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecElementArgument,
+ ArgInfo));
+ return;
+ }
+ case IIT_SCALABLE_VEC: {
+ DecodeIITType(NextElt, Infos, Info, OutputTable);
+ return;
+ }
+ case IIT_VEC_OF_BITCASTS_TO_INT: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfBitcastsToInt,
+ ArgInfo));
+ return;
+ }
+ }
+ llvm_unreachable("unhandled");
+}
+
+#define GET_INTRINSIC_GENERATOR_GLOBAL
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_INTRINSIC_GENERATOR_GLOBAL
+
+void Intrinsic::getIntrinsicInfoTableEntries(ID id,
+ SmallVectorImpl<IITDescriptor> &T){
+ // Check to see if the intrinsic's type was expressible by the table.
+ unsigned TableVal = IIT_Table[id-1];
+
+ // Decode the TableVal into an array of IITValues.
+ SmallVector<unsigned char, 8> IITValues;
+ ArrayRef<unsigned char> IITEntries;
+ unsigned NextElt = 0;
+ if ((TableVal >> 31) != 0) {
+ // This is an offset into the IIT_LongEncodingTable.
+ IITEntries = IIT_LongEncodingTable;
+
+ // Strip sentinel bit.
+ NextElt = (TableVal << 1) >> 1;
+ } else {
+ // Decode the TableVal into an array of IITValues. If the entry was encoded
+ // into a single word in the table itself, decode it now.
+ do {
+ IITValues.push_back(TableVal & 0xF);
+ TableVal >>= 4;
+ } while (TableVal);
+
+ IITEntries = IITValues;
+ NextElt = 0;
+ }
+
+ // Okay, decode the table into the output vector of IITDescriptors.
+ DecodeIITType(NextElt, IITEntries, IIT_Done, T);
+ while (NextElt != IITEntries.size() && IITEntries[NextElt] != 0)
+ DecodeIITType(NextElt, IITEntries, IIT_Done, T);
+}
+
+static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ ArrayRef<Type*> Tys, LLVMContext &Context) {
+ using namespace Intrinsic;
+
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+
+ switch (D.Kind) {
+ case IITDescriptor::Void: return Type::getVoidTy(Context);
+ case IITDescriptor::VarArg: return Type::getVoidTy(Context);
+ case IITDescriptor::MMX: return Type::getX86_MMXTy(Context);
+ case IITDescriptor::AMX: return Type::getX86_AMXTy(Context);
+ case IITDescriptor::Token: return Type::getTokenTy(Context);
+ case IITDescriptor::Metadata: return Type::getMetadataTy(Context);
+ case IITDescriptor::Half: return Type::getHalfTy(Context);
+ case IITDescriptor::BFloat: return Type::getBFloatTy(Context);
+ case IITDescriptor::Float: return Type::getFloatTy(Context);
+ case IITDescriptor::Double: return Type::getDoubleTy(Context);
+ case IITDescriptor::Quad: return Type::getFP128Ty(Context);
+ case IITDescriptor::PPCQuad: return Type::getPPC_FP128Ty(Context);
+
+ case IITDescriptor::Integer:
+ return IntegerType::get(Context, D.Integer_Width);
+ case IITDescriptor::Vector:
+ return VectorType::get(DecodeFixedType(Infos, Tys, Context),
+ D.Vector_Width);
+ case IITDescriptor::Pointer:
+ return PointerType::get(DecodeFixedType(Infos, Tys, Context),
+ D.Pointer_AddressSpace);
+ case IITDescriptor::Struct: {
+ SmallVector<Type *, 8> Elts;
+ for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
+ Elts.push_back(DecodeFixedType(Infos, Tys, Context));
+ return StructType::get(Context, Elts);
+ }
+ case IITDescriptor::Argument:
+ return Tys[D.getArgumentNumber()];
+ case IITDescriptor::ExtendArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::getExtendedElementVectorType(VTy);
+
+ return IntegerType::get(Context, 2 * cast<IntegerType>(Ty)->getBitWidth());
+ }
+ case IITDescriptor::TruncArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::getTruncatedElementVectorType(VTy);
+
+ IntegerType *ITy = cast<IntegerType>(Ty);
+ assert(ITy->getBitWidth() % 2 == 0);
+ return IntegerType::get(Context, ITy->getBitWidth() / 2);
+ }
+ case IITDescriptor::Subdivide2Argument:
+ case IITDescriptor::Subdivide4Argument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ assert(VTy && "Expected an argument of Vector Type");
+ int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2;
+ return VectorType::getSubdividedVectorType(VTy, SubDivs);
+ }
+ case IITDescriptor::HalfVecArgument:
+ return VectorType::getHalfElementsVectorType(cast<VectorType>(
+ Tys[D.getArgumentNumber()]));
+ case IITDescriptor::SameVecWidthArgument: {
+ Type *EltTy = DecodeFixedType(Infos, Tys, Context);
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (auto *VTy = dyn_cast<VectorType>(Ty))
+ return VectorType::get(EltTy, VTy->getElementCount());
+ return EltTy;
+ }
+ case IITDescriptor::PtrToArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ return PointerType::getUnqual(Ty);
+ }
+ case IITDescriptor::PtrToElt: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (!VTy)
+ llvm_unreachable("Expected an argument of Vector Type");
+ Type *EltTy = VTy->getElementType();
+ return PointerType::getUnqual(EltTy);
+ }
+ case IITDescriptor::VecElementArgument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return VTy->getElementType();
+ llvm_unreachable("Expected an argument of Vector Type");
+ }
+ case IITDescriptor::VecOfBitcastsToInt: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ assert(VTy && "Expected an argument of Vector Type");
+ return VectorType::getInteger(VTy);
+ }
+ case IITDescriptor::VecOfAnyPtrsToElt:
+ // Return the overloaded type (which determines the pointers address space)
+ return Tys[D.getOverloadArgNumber()];
+ case IITDescriptor::AnyPtrToElt:
+ // Return the overloaded type (which determines the pointers address space)
+ return Tys[D.getOverloadArgNumber()];
+ }
+ llvm_unreachable("unhandled");
+}
+
+FunctionType *Intrinsic::getType(LLVMContext &Context,
+ ID id, ArrayRef<Type*> Tys) {
+ SmallVector<IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(id, Table);
+
+ ArrayRef<IITDescriptor> TableRef = Table;
+ Type *ResultTy = DecodeFixedType(TableRef, Tys, Context);
+
+ SmallVector<Type*, 8> ArgTys;
+ while (!TableRef.empty())
+ ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context));
+
+ // DecodeFixedType returns Void for IITDescriptor::Void and IITDescriptor::VarArg
+ // If we see void type as the type of the last argument, it is vararg intrinsic
+ if (!ArgTys.empty() && ArgTys.back()->isVoidTy()) {
+ ArgTys.pop_back();
+ return FunctionType::get(ResultTy, ArgTys, true);
+ }
+ return FunctionType::get(ResultTy, ArgTys, false);
+}
+
+bool Intrinsic::isOverloaded(ID id) {
+#define GET_INTRINSIC_OVERLOAD_TABLE
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_INTRINSIC_OVERLOAD_TABLE
+}
+
+bool Intrinsic::isLeaf(ID id) {
+ switch (id) {
+ default:
+ return true;
+
+ case Intrinsic::experimental_gc_statepoint:
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ return false;
+ }
+}
+
+/// This defines the "Intrinsic::getAttributes(ID id)" method.
+#define GET_INTRINSIC_ATTRIBUTES
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_INTRINSIC_ATTRIBUTES
+
+Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
+ // There can never be multiple globals with the same name of different types,
+ // because intrinsics must be a specific type.
+ auto *FT = getType(M->getContext(), id, Tys);
+ return cast<Function>(
+ M->getOrInsertFunction(Tys.empty() ? getName(id)
+ : getName(id, Tys, M, FT),
+ getType(M->getContext(), id, Tys))
+ .getCallee());
+}
+
+// This defines the "Intrinsic::getIntrinsicForClangBuiltin()" method.
+#define GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN
+
+// This defines the "Intrinsic::getIntrinsicForMSBuiltin()" method.
+#define GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+#include "llvm/IR/IntrinsicImpl.inc"
+#undef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
+
+using DeferredIntrinsicMatchPair =
+ std::pair<Type *, ArrayRef<Intrinsic::IITDescriptor>>;
+
+static bool matchIntrinsicType(
+ Type *Ty, ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type *> &ArgTys,
+ SmallVectorImpl<DeferredIntrinsicMatchPair> &DeferredChecks,
+ bool IsDeferredCheck) {
+ using namespace Intrinsic;
+
+ // If we ran out of descriptors, there are too many arguments.
+ if (Infos.empty()) return true;
+
+ // Do this before slicing off the 'front' part
+ auto InfosRef = Infos;
+ auto DeferCheck = [&DeferredChecks, &InfosRef](Type *T) {
+ DeferredChecks.emplace_back(T, InfosRef);
+ return false;
+ };
+
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+
+ switch (D.Kind) {
+ case IITDescriptor::Void: return !Ty->isVoidTy();
+ case IITDescriptor::VarArg: return true;
+ case IITDescriptor::MMX: return !Ty->isX86_MMXTy();
+ case IITDescriptor::AMX: return !Ty->isX86_AMXTy();
+ case IITDescriptor::Token: return !Ty->isTokenTy();
+ case IITDescriptor::Metadata: return !Ty->isMetadataTy();
+ case IITDescriptor::Half: return !Ty->isHalfTy();
+ case IITDescriptor::BFloat: return !Ty->isBFloatTy();
+ case IITDescriptor::Float: return !Ty->isFloatTy();
+ case IITDescriptor::Double: return !Ty->isDoubleTy();
+ case IITDescriptor::Quad: return !Ty->isFP128Ty();
+ case IITDescriptor::PPCQuad: return !Ty->isPPC_FP128Ty();
+ case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width);
+ case IITDescriptor::Vector: {
+ VectorType *VT = dyn_cast<VectorType>(Ty);
+ return !VT || VT->getElementCount() != D.Vector_Width ||
+ matchIntrinsicType(VT->getElementType(), Infos, ArgTys,
+ DeferredChecks, IsDeferredCheck);
+ }
+ case IITDescriptor::Pointer: {
+ PointerType *PT = dyn_cast<PointerType>(Ty);
+ if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace)
+ return true;
+ if (!PT->isOpaque()) {
+ /* Manually consume a pointer to empty struct descriptor, which is
+ * used for externref. We don't want to enforce that the struct is
+ * anonymous in this case. (This renders externref intrinsics
+ * non-unique, but this will go away with opaque pointers anyway.) */
+ if (Infos.front().Kind == IITDescriptor::Struct &&
+ Infos.front().Struct_NumElements == 0) {
+ Infos = Infos.slice(1);
+ return false;
+ }
+ return matchIntrinsicType(PT->getNonOpaquePointerElementType(), Infos,
+ ArgTys, DeferredChecks, IsDeferredCheck);
+ }
+ // Consume IIT descriptors relating to the pointer element type.
+ // FIXME: Intrinsic type matching of nested single value types or even
+ // aggregates doesn't work properly with opaque pointers but hopefully
+ // doesn't happen in practice.
+ while (Infos.front().Kind == IITDescriptor::Pointer ||
+ Infos.front().Kind == IITDescriptor::Vector)
+ Infos = Infos.slice(1);
+ assert((Infos.front().Kind != IITDescriptor::Argument ||
+ Infos.front().getArgumentKind() == IITDescriptor::AK_MatchType) &&
+ "Unsupported polymorphic pointer type with opaque pointer");
+ Infos = Infos.slice(1);
+ return false;
+ }
+
+ case IITDescriptor::Struct: {
+ StructType *ST = dyn_cast<StructType>(Ty);
+ if (!ST || !ST->isLiteral() || ST->isPacked() ||
+ ST->getNumElements() != D.Struct_NumElements)
+ return true;
+
+ for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i)
+ if (matchIntrinsicType(ST->getElementType(i), Infos, ArgTys,
+ DeferredChecks, IsDeferredCheck))
+ return true;
+ return false;
+ }
+
+ case IITDescriptor::Argument:
+ // If this is the second occurrence of an argument,
+ // verify that the later instance matches the previous instance.
+ if (D.getArgumentNumber() < ArgTys.size())
+ return Ty != ArgTys[D.getArgumentNumber()];
+
+ if (D.getArgumentNumber() > ArgTys.size() ||
+ D.getArgumentKind() == IITDescriptor::AK_MatchType)
+ return IsDeferredCheck || DeferCheck(Ty);
+
+ assert(D.getArgumentNumber() == ArgTys.size() && !IsDeferredCheck &&
+ "Table consistency error");
+ ArgTys.push_back(Ty);
+
+ switch (D.getArgumentKind()) {
+ case IITDescriptor::AK_Any: return false; // Success
+ case IITDescriptor::AK_AnyInteger: return !Ty->isIntOrIntVectorTy();
+ case IITDescriptor::AK_AnyFloat: return !Ty->isFPOrFPVectorTy();
+ case IITDescriptor::AK_AnyVector: return !isa<VectorType>(Ty);
+ case IITDescriptor::AK_AnyPointer: return !isa<PointerType>(Ty);
+ default: break;
+ }
+ llvm_unreachable("all argument kinds not covered");
+
+ case IITDescriptor::ExtendArgument: {
+ // If this is a forward reference, defer the check for later.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
+ NewTy = VectorType::getExtendedElementVectorType(VTy);
+ else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
+ NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth());
+ else
+ return true;
+
+ return Ty != NewTy;
+ }
+ case IITDescriptor::TruncArgument: {
+ // If this is a forward reference, defer the check for later.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (VectorType *VTy = dyn_cast<VectorType>(NewTy))
+ NewTy = VectorType::getTruncatedElementVectorType(VTy);
+ else if (IntegerType *ITy = dyn_cast<IntegerType>(NewTy))
+ NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2);
+ else
+ return true;
+
+ return Ty != NewTy;
+ }
+ case IITDescriptor::HalfVecArgument:
+ // If this is a forward reference, defer the check for later.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+ return !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
+ VectorType::getHalfElementsVectorType(
+ cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
+ case IITDescriptor::SameVecWidthArgument: {
+ if (D.getArgumentNumber() >= ArgTys.size()) {
+ // Defer check and subsequent check for the vector element type.
+ Infos = Infos.slice(1);
+ return IsDeferredCheck || DeferCheck(Ty);
+ }
+ auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
+ auto *ThisArgType = dyn_cast<VectorType>(Ty);
+ // Both must be vectors of the same number of elements or neither.
+ if ((ReferenceType != nullptr) != (ThisArgType != nullptr))
+ return true;
+ Type *EltTy = Ty;
+ if (ThisArgType) {
+ if (ReferenceType->getElementCount() !=
+ ThisArgType->getElementCount())
+ return true;
+ EltTy = ThisArgType->getElementType();
+ }
+ return matchIntrinsicType(EltTy, Infos, ArgTys, DeferredChecks,
+ IsDeferredCheck);
+ }
+ case IITDescriptor::PtrToArgument: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+ Type * ReferenceType = ArgTys[D.getArgumentNumber()];
+ PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
+ return (!ThisArgType ||
+ !ThisArgType->isOpaqueOrPointeeTypeMatches(ReferenceType));
+ }
+ case IITDescriptor::PtrToElt: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+ VectorType * ReferenceType =
+ dyn_cast<VectorType> (ArgTys[D.getArgumentNumber()]);
+ PointerType *ThisArgType = dyn_cast<PointerType>(Ty);
+
+ if (!ThisArgType || !ReferenceType)
+ return true;
+ return !ThisArgType->isOpaqueOrPointeeTypeMatches(
+ ReferenceType->getElementType());
+ }
+ case IITDescriptor::AnyPtrToElt: {
+ unsigned RefArgNumber = D.getRefArgNumber();
+ if (RefArgNumber >= ArgTys.size()) {
+ if (IsDeferredCheck)
+ return true;
+ // If forward referencing, already add the pointer type and
+ // defer the checks for later.
+ ArgTys.push_back(Ty);
+ return DeferCheck(Ty);
+ }
+
+ if (!IsDeferredCheck) {
+ assert(D.getOverloadArgNumber() == ArgTys.size() &&
+ "Table consistency error");
+ ArgTys.push_back(Ty);
+ }
+
+ auto *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]);
+ auto *ThisArgType = dyn_cast<PointerType>(Ty);
+ if (!ThisArgType || !ReferenceType)
+ return true;
+ return !ThisArgType->isOpaqueOrPointeeTypeMatches(
+ ReferenceType->getElementType());
+ }
+ case IITDescriptor::VecOfAnyPtrsToElt: {
+ unsigned RefArgNumber = D.getRefArgNumber();
+ if (RefArgNumber >= ArgTys.size()) {
+ if (IsDeferredCheck)
+ return true;
+ // If forward referencing, already add the pointer-vector type and
+ // defer the checks for later.
+ ArgTys.push_back(Ty);
+ return DeferCheck(Ty);
+ }
+
+ if (!IsDeferredCheck){
+ assert(D.getOverloadArgNumber() == ArgTys.size() &&
+ "Table consistency error");
+ ArgTys.push_back(Ty);
+ }
+
+ // Verify the overloaded type "matches" the Ref type.
+ // i.e. Ty is a vector with the same width as Ref.
+ // Composed of pointers to the same element type as Ref.
+ auto *ReferenceType = dyn_cast<VectorType>(ArgTys[RefArgNumber]);
+ auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
+ if (!ThisArgVecTy || !ReferenceType ||
+ (ReferenceType->getElementCount() != ThisArgVecTy->getElementCount()))
+ return true;
+ PointerType *ThisArgEltTy =
+ dyn_cast<PointerType>(ThisArgVecTy->getElementType());
+ if (!ThisArgEltTy)
+ return true;
+ return !ThisArgEltTy->isOpaqueOrPointeeTypeMatches(
+ ReferenceType->getElementType());
+ }
+ case IITDescriptor::VecElementArgument: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck ? true : DeferCheck(Ty);
+ auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
+ return !ReferenceType || Ty != ReferenceType->getElementType();
+ }
+ case IITDescriptor::Subdivide2Argument:
+ case IITDescriptor::Subdivide4Argument: {
+ // If this is a forward reference, defer the check for later.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (auto *VTy = dyn_cast<VectorType>(NewTy)) {
+ int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2;
+ NewTy = VectorType::getSubdividedVectorType(VTy, SubDivs);
+ return Ty != NewTy;
+ }
+ return true;
+ }
+ case IITDescriptor::VecOfBitcastsToInt: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+ auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
+ auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
+ if (!ThisArgVecTy || !ReferenceType)
+ return true;
+ return ThisArgVecTy != VectorType::getInteger(ReferenceType);
+ }
+ }
+ llvm_unreachable("unhandled");
+}
+
+Intrinsic::MatchIntrinsicTypesResult
+Intrinsic::matchIntrinsicSignature(FunctionType *FTy,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos,
+ SmallVectorImpl<Type *> &ArgTys) {
+ SmallVector<DeferredIntrinsicMatchPair, 2> DeferredChecks;
+ if (matchIntrinsicType(FTy->getReturnType(), Infos, ArgTys, DeferredChecks,
+ false))
+ return MatchIntrinsicTypes_NoMatchRet;
+
+ unsigned NumDeferredReturnChecks = DeferredChecks.size();
+
+ for (auto Ty : FTy->params())
+ if (matchIntrinsicType(Ty, Infos, ArgTys, DeferredChecks, false))
+ return MatchIntrinsicTypes_NoMatchArg;
+
+ for (unsigned I = 0, E = DeferredChecks.size(); I != E; ++I) {
+ DeferredIntrinsicMatchPair &Check = DeferredChecks[I];
+ if (matchIntrinsicType(Check.first, Check.second, ArgTys, DeferredChecks,
+ true))
+ return I < NumDeferredReturnChecks ? MatchIntrinsicTypes_NoMatchRet
+ : MatchIntrinsicTypes_NoMatchArg;
+ }
+
+ return MatchIntrinsicTypes_Match;
+}
+
+bool
+Intrinsic::matchIntrinsicVarArg(bool isVarArg,
+ ArrayRef<Intrinsic::IITDescriptor> &Infos) {
+ // If there are no descriptors left, then it can't be a vararg.
+ if (Infos.empty())
+ return isVarArg;
+
+ // There should be only one descriptor remaining at this point.
+ if (Infos.size() != 1)
+ return true;
+
+ // Check and verify the descriptor.
+ IITDescriptor D = Infos.front();
+ Infos = Infos.slice(1);
+ if (D.Kind == IITDescriptor::VarArg)
+ return !isVarArg;
+
+ return true;
+}
+
+bool Intrinsic::getIntrinsicSignature(Function *F,
+ SmallVectorImpl<Type *> &ArgTys) {
+ Intrinsic::ID ID = F->getIntrinsicID();
+ if (!ID)
+ return false;
+
+ SmallVector<Intrinsic::IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(ID, Table);
+ ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
+
+ if (Intrinsic::matchIntrinsicSignature(F->getFunctionType(), TableRef,
+ ArgTys) !=
+ Intrinsic::MatchIntrinsicTypesResult::MatchIntrinsicTypes_Match) {
+ return false;
+ }
+ if (Intrinsic::matchIntrinsicVarArg(F->getFunctionType()->isVarArg(),
+ TableRef))
+ return false;
+ return true;
+}
+
+Optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
+ SmallVector<Type *, 4> ArgTys;
+ if (!getIntrinsicSignature(F, ArgTys))
+ return None;
+
+ Intrinsic::ID ID = F->getIntrinsicID();
+ StringRef Name = F->getName();
+ std::string WantedName =
+ Intrinsic::getName(ID, ArgTys, F->getParent(), F->getFunctionType());
+ if (Name == WantedName)
+ return None;
+
+ Function *NewDecl = [&] {
+ if (auto *ExistingGV = F->getParent()->getNamedValue(WantedName)) {
+ if (auto *ExistingF = dyn_cast<Function>(ExistingGV))
+ if (ExistingF->getFunctionType() == F->getFunctionType())
+ return ExistingF;
+
+ // The name already exists, but is not a function or has the wrong
+ // prototype. Make place for the new one by renaming the old version.
+ // Either this old version will be removed later on or the module is
+ // invalid and we'll get an error.
+ ExistingGV->setName(WantedName + ".renamed");
+ }
+ return Intrinsic::getDeclaration(F->getParent(), ID, ArgTys);
+ }();
+
+ NewDecl->setCallingConv(F->getCallingConv());
+ assert(NewDecl->getFunctionType() == F->getFunctionType() &&
+ "Shouldn't change the signature");
+ return NewDecl;
+}
+
+/// hasAddressTaken - returns true if there are any uses of this function
+/// other than direct calls or invokes to it. Optionally ignores callback
+/// uses, assume like pointer annotation calls, and references in llvm.used
+/// and llvm.compiler.used variables.
+bool Function::hasAddressTaken(const User **PutOffender,
+ bool IgnoreCallbackUses,
+ bool IgnoreAssumeLikeCalls, bool IgnoreLLVMUsed,
+ bool IgnoreARCAttachedCall) const {
+ for (const Use &U : uses()) {
+ const User *FU = U.getUser();
+ if (isa<BlockAddress>(FU))
+ continue;
+
+ if (IgnoreCallbackUses) {
+ AbstractCallSite ACS(&U);
+ if (ACS && ACS.isCallbackCall())
+ continue;
+ }
+
+ const auto *Call = dyn_cast<CallBase>(FU);
+ if (!Call) {
+ if (IgnoreAssumeLikeCalls) {
+ if (const auto *FI = dyn_cast<Instruction>(FU)) {
+ if (FI->isCast() && !FI->user_empty() &&
+ llvm::all_of(FU->users(), [](const User *U) {
+ if (const auto *I = dyn_cast<IntrinsicInst>(U))
+ return I->isAssumeLikeIntrinsic();
+ return false;
+ }))
+ continue;
+ }
+ }
+ if (IgnoreLLVMUsed && !FU->user_empty()) {
+ const User *FUU = FU;
+ if (isa<BitCastOperator>(FU) && FU->hasOneUse() &&
+ !FU->user_begin()->user_empty())
+ FUU = *FU->user_begin();
+ if (llvm::all_of(FUU->users(), [](const User *U) {
+ if (const auto *GV = dyn_cast<GlobalVariable>(U))
+ return GV->hasName() &&
+ (GV->getName().equals("llvm.compiler.used") ||
+ GV->getName().equals("llvm.used"));
+ return false;
+ }))
+ continue;
+ }
+ if (PutOffender)
+ *PutOffender = FU;
+ return true;
+ }
+ if (!Call->isCallee(&U) || Call->getFunctionType() != getFunctionType()) {
+ if (IgnoreARCAttachedCall &&
+ Call->isOperandBundleOfType(LLVMContext::OB_clang_arc_attachedcall,
+ U.getOperandNo()))
+ continue;
+
+ if (PutOffender)
+ *PutOffender = FU;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Function::isDefTriviallyDead() const {
+ // Check the linkage
+ if (!hasLinkOnceLinkage() && !hasLocalLinkage() &&
+ !hasAvailableExternallyLinkage())
+ return false;
+
+ // Check if the function is used by anything other than a blockaddress.
+ for (const User *U : users())
+ if (!isa<BlockAddress>(U))
+ return false;
+
+ return true;
+}
+
+/// callsFunctionThatReturnsTwice - Return true if the function has a call to
+/// setjmp or other function that gcc recognizes as "returning twice".
+bool Function::callsFunctionThatReturnsTwice() const {
+ for (const Instruction &I : instructions(this))
+ if (const auto *Call = dyn_cast<CallBase>(&I))
+ if (Call->hasFnAttr(Attribute::ReturnsTwice))
+ return true;
+
+ return false;
+}
+
+Constant *Function::getPersonalityFn() const {
+ assert(hasPersonalityFn() && getNumOperands());
+ return cast<Constant>(Op<0>());
+}
+
+void Function::setPersonalityFn(Constant *Fn) {
+ setHungoffOperand<0>(Fn);
+ setValueSubclassDataBit(3, Fn != nullptr);
+}
+
+Constant *Function::getPrefixData() const {
+ assert(hasPrefixData() && getNumOperands());
+ return cast<Constant>(Op<1>());
+}
+
+void Function::setPrefixData(Constant *PrefixData) {
+ setHungoffOperand<1>(PrefixData);
+ setValueSubclassDataBit(1, PrefixData != nullptr);
+}
+
+Constant *Function::getPrologueData() const {
+ assert(hasPrologueData() && getNumOperands());
+ return cast<Constant>(Op<2>());
+}
+
+void Function::setPrologueData(Constant *PrologueData) {
+ setHungoffOperand<2>(PrologueData);
+ setValueSubclassDataBit(2, PrologueData != nullptr);
+}
+
+void Function::allocHungoffUselist() {
+ // If we've already allocated a uselist, stop here.
+ if (getNumOperands())
+ return;
+
+ allocHungoffUses(3, /*IsPhi=*/ false);
+ setNumHungOffUseOperands(3);
+
+ // Initialize the uselist with placeholder operands to allow traversal.
+ auto *CPN = ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0));
+ Op<0>().set(CPN);
+ Op<1>().set(CPN);
+ Op<2>().set(CPN);
+}
+
+template <int Idx>
+void Function::setHungoffOperand(Constant *C) {
+ if (C) {
+ allocHungoffUselist();
+ Op<Idx>().set(C);
+ } else if (getNumOperands()) {
+ Op<Idx>().set(
+ ConstantPointerNull::get(Type::getInt1PtrTy(getContext(), 0)));
+ }
+}
+
+void Function::setValueSubclassDataBit(unsigned Bit, bool On) {
+ assert(Bit < 16 && "SubclassData contains only 16 bits");
+ if (On)
+ setValueSubclassData(getSubclassDataFromValue() | (1 << Bit));
+ else
+ setValueSubclassData(getSubclassDataFromValue() & ~(1 << Bit));
+}
+
+void Function::setEntryCount(ProfileCount Count,
+ const DenseSet<GlobalValue::GUID> *S) {
+#if !defined(NDEBUG)
+ auto PrevCount = getEntryCount();
+ assert(!PrevCount || PrevCount->getType() == Count.getType());
+#endif
+
+ auto ImportGUIDs = getImportGUIDs();
+ if (S == nullptr && ImportGUIDs.size())
+ S = &ImportGUIDs;
+
+ MDBuilder MDB(getContext());
+ setMetadata(
+ LLVMContext::MD_prof,
+ MDB.createFunctionEntryCount(Count.getCount(), Count.isSynthetic(), S));
+}
+
+void Function::setEntryCount(uint64_t Count, Function::ProfileCountType Type,
+ const DenseSet<GlobalValue::GUID> *Imports) {
+ setEntryCount(ProfileCount(Count, Type), Imports);
+}
+
+Optional<ProfileCount> Function::getEntryCount(bool AllowSynthetic) const {
+ MDNode *MD = getMetadata(LLVMContext::MD_prof);
+ if (MD && MD->getOperand(0))
+ if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0))) {
+ if (MDS->getString().equals("function_entry_count")) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1));
+ uint64_t Count = CI->getValue().getZExtValue();
+ // A value of -1 is used for SamplePGO when there were no samples.
+ // Treat this the same as unknown.
+ if (Count == (uint64_t)-1)
+ return None;
+ return ProfileCount(Count, PCT_Real);
+ } else if (AllowSynthetic &&
+ MDS->getString().equals("synthetic_function_entry_count")) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(1));
+ uint64_t Count = CI->getValue().getZExtValue();
+ return ProfileCount(Count, PCT_Synthetic);
+ }
+ }
+ return None;
+}
+
+DenseSet<GlobalValue::GUID> Function::getImportGUIDs() const {
+ DenseSet<GlobalValue::GUID> R;
+ if (MDNode *MD = getMetadata(LLVMContext::MD_prof))
+ if (MDString *MDS = dyn_cast<MDString>(MD->getOperand(0)))
+ if (MDS->getString().equals("function_entry_count"))
+ for (unsigned i = 2; i < MD->getNumOperands(); i++)
+ R.insert(mdconst::extract<ConstantInt>(MD->getOperand(i))
+ ->getValue()
+ .getZExtValue());
+ return R;
+}
+
+void Function::setSectionPrefix(StringRef Prefix) {
+ MDBuilder MDB(getContext());
+ setMetadata(LLVMContext::MD_section_prefix,
+ MDB.createFunctionSectionPrefix(Prefix));
+}
+
+Optional<StringRef> Function::getSectionPrefix() const {
+ if (MDNode *MD = getMetadata(LLVMContext::MD_section_prefix)) {
+ assert(cast<MDString>(MD->getOperand(0))
+ ->getString()
+ .equals("function_section_prefix") &&
+ "Metadata not match");
+ return cast<MDString>(MD->getOperand(1))->getString();
+ }
+ return None;
+}
+
+bool Function::nullPointerIsDefined() const {
+ return hasFnAttribute(Attribute::NullPointerIsValid);
+}
+
+bool llvm::NullPointerIsDefined(const Function *F, unsigned AS) {
+ if (F && F->nullPointerIsDefined())
+ return true;
+
+ if (AS != 0)
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/GCStrategy.cpp b/contrib/llvm-project/llvm/lib/IR/GCStrategy.cpp
new file mode 100644
index 000000000000..f3bc5b74f8fd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/GCStrategy.cpp
@@ -0,0 +1,38 @@
+//===- GCStrategy.cpp - Garbage Collector Description ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the policy object GCStrategy which describes the
+// behavior of a given garbage collector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/GCStrategy.h"
+
+using namespace llvm;
+
+LLVM_INSTANTIATE_REGISTRY(GCRegistry)
+
+GCStrategy::GCStrategy() = default;
+
+std::unique_ptr<GCStrategy> llvm::getGCStrategy(const StringRef Name) {
+ for (auto &S : GCRegistry::entries())
+ if (S.getName() == Name)
+ return S.instantiate();
+
+ if (GCRegistry::begin() == GCRegistry::end()) {
+ // In normal operation, the registry should not be empty. There should
+ // be the builtin GCs if nothing else. The most likely scenario here is
+ // that we got here without running the initializers used by the Registry
+ // itself and it's registration mechanism.
+ const std::string error =
+ std::string("unsupported GC: ") + Name.str() +
+ " (did you remember to link and initialize the library?)";
+ report_fatal_error(error);
+ } else
+ report_fatal_error(std::string("unsupported GC: ") + Name.str());
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/GVMaterializer.cpp b/contrib/llvm-project/llvm/lib/IR/GVMaterializer.cpp
new file mode 100644
index 000000000000..dc3b0e0fc236
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/GVMaterializer.cpp
@@ -0,0 +1,17 @@
+//===-- GVMaterializer.cpp - Base implementation for GV materializers -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Minimal implementation of the abstract interface for materializing
+// GlobalValues.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/GVMaterializer.h"
+using namespace llvm;
+
+GVMaterializer::~GVMaterializer() = default;
diff --git a/contrib/llvm-project/llvm/lib/IR/Globals.cpp b/contrib/llvm-project/llvm/lib/IR/Globals.cpp
new file mode 100644
index 000000000000..3265050261c8
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Globals.cpp
@@ -0,0 +1,581 @@
+//===-- Globals.cpp - Implement the GlobalValue & GlobalVariable class ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the GlobalValue & GlobalVariable classes for the IR
+// library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// GlobalValue Class
+//===----------------------------------------------------------------------===//
+
+// GlobalValue should be a Constant, plus a type, a module, some flags, and an
+// intrinsic ID. Add an assert to prevent people from accidentally growing
+// GlobalValue while adding flags.
+static_assert(sizeof(GlobalValue) ==
+ sizeof(Constant) + 2 * sizeof(void *) + 2 * sizeof(unsigned),
+ "unexpected GlobalValue size growth");
+
+// GlobalObject adds a comdat.
+static_assert(sizeof(GlobalObject) == sizeof(GlobalValue) + sizeof(void *),
+ "unexpected GlobalObject size growth");
+
+bool GlobalValue::isMaterializable() const {
+ if (const Function *F = dyn_cast<Function>(this))
+ return F->isMaterializable();
+ return false;
+}
+Error GlobalValue::materialize() {
+ return getParent()->materialize(this);
+}
+
+/// Override destroyConstantImpl to make sure it doesn't get called on
+/// GlobalValue's because they shouldn't be treated like other constants.
+void GlobalValue::destroyConstantImpl() {
+ llvm_unreachable("You can't GV->destroyConstantImpl()!");
+}
+
+Value *GlobalValue::handleOperandChangeImpl(Value *From, Value *To) {
+ llvm_unreachable("Unsupported class for handleOperandChange()!");
+}
+
+/// copyAttributesFrom - copy all additional attributes (those not needed to
+/// create a GlobalValue) from the GlobalValue Src to this one.
+void GlobalValue::copyAttributesFrom(const GlobalValue *Src) {
+ setVisibility(Src->getVisibility());
+ setUnnamedAddr(Src->getUnnamedAddr());
+ setThreadLocalMode(Src->getThreadLocalMode());
+ setDLLStorageClass(Src->getDLLStorageClass());
+ setDSOLocal(Src->isDSOLocal());
+ setPartition(Src->getPartition());
+ if (Src->hasSanitizerMetadata())
+ setSanitizerMetadata(Src->getSanitizerMetadata());
+ else
+ removeSanitizerMetadata();
+}
+
+void GlobalValue::removeFromParent() {
+ switch (getValueID()) {
+#define HANDLE_GLOBAL_VALUE(NAME) \
+ case Value::NAME##Val: \
+ return static_cast<NAME *>(this)->removeFromParent();
+#include "llvm/IR/Value.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a global");
+}
+
+void GlobalValue::eraseFromParent() {
+ switch (getValueID()) {
+#define HANDLE_GLOBAL_VALUE(NAME) \
+ case Value::NAME##Val: \
+ return static_cast<NAME *>(this)->eraseFromParent();
+#include "llvm/IR/Value.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a global");
+}
+
+GlobalObject::~GlobalObject() { setComdat(nullptr); }
+
+bool GlobalValue::isInterposable() const {
+ if (isInterposableLinkage(getLinkage()))
+ return true;
+ return getParent() && getParent()->getSemanticInterposition() &&
+ !isDSOLocal();
+}
+
+bool GlobalValue::canBenefitFromLocalAlias() const {
+ // See AsmPrinter::getSymbolPreferLocal(). For a deduplicate comdat kind,
+ // references to a discarded local symbol from outside the group are not
+ // allowed, so avoid the local alias.
+ auto isDeduplicateComdat = [](const Comdat *C) {
+ return C && C->getSelectionKind() != Comdat::NoDeduplicate;
+ };
+ return hasDefaultVisibility() &&
+ GlobalObject::isExternalLinkage(getLinkage()) && !isDeclaration() &&
+ !isa<GlobalIFunc>(this) && !isDeduplicateComdat(getComdat());
+}
+
+unsigned GlobalValue::getAddressSpace() const {
+ PointerType *PtrTy = getType();
+ return PtrTy->getAddressSpace();
+}
+
+void GlobalObject::setAlignment(MaybeAlign Align) {
+ assert((!Align || *Align <= MaximumAlignment) &&
+ "Alignment is greater than MaximumAlignment!");
+ unsigned AlignmentData = encode(Align);
+ unsigned OldData = getGlobalValueSubClassData();
+ setGlobalValueSubClassData((OldData & ~AlignmentMask) | AlignmentData);
+ assert(MaybeAlign(getAlignment()) == Align &&
+ "Alignment representation error!");
+}
+
+void GlobalObject::copyAttributesFrom(const GlobalObject *Src) {
+ GlobalValue::copyAttributesFrom(Src);
+ setAlignment(Src->getAlign());
+ setSection(Src->getSection());
+}
+
+std::string GlobalValue::getGlobalIdentifier(StringRef Name,
+ GlobalValue::LinkageTypes Linkage,
+ StringRef FileName) {
+
+ // Value names may be prefixed with a binary '1' to indicate
+ // that the backend should not modify the symbols due to any platform
+ // naming convention. Do not include that '1' in the PGO profile name.
+ if (Name[0] == '\1')
+ Name = Name.substr(1);
+
+ std::string NewName = std::string(Name);
+ if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
+ // For local symbols, prepend the main file name to distinguish them.
+ // Do not include the full path in the file name since there's no guarantee
+ // that it will stay the same, e.g., if the files are checked out from
+ // version control in different locations.
+ if (FileName.empty())
+ NewName = NewName.insert(0, "<unknown>:");
+ else
+ NewName = NewName.insert(0, FileName.str() + ":");
+ }
+ return NewName;
+}
+
+std::string GlobalValue::getGlobalIdentifier() const {
+ return getGlobalIdentifier(getName(), getLinkage(),
+ getParent()->getSourceFileName());
+}
+
+StringRef GlobalValue::getSection() const {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getAliaseeObject())
+ return GO->getSection();
+ return "";
+ }
+ return cast<GlobalObject>(this)->getSection();
+}
+
+const Comdat *GlobalValue::getComdat() const {
+ if (auto *GA = dyn_cast<GlobalAlias>(this)) {
+ // In general we cannot compute this at the IR level, but we try.
+ if (const GlobalObject *GO = GA->getAliaseeObject())
+ return const_cast<GlobalObject *>(GO)->getComdat();
+ return nullptr;
+ }
+ // ifunc and its resolver are separate things so don't use resolver comdat.
+ if (isa<GlobalIFunc>(this))
+ return nullptr;
+ return cast<GlobalObject>(this)->getComdat();
+}
+
+void GlobalObject::setComdat(Comdat *C) {
+ if (ObjComdat)
+ ObjComdat->removeUser(this);
+ ObjComdat = C;
+ if (C)
+ C->addUser(this);
+}
+
+StringRef GlobalValue::getPartition() const {
+ if (!hasPartition())
+ return "";
+ return getContext().pImpl->GlobalValuePartitions[this];
+}
+
+void GlobalValue::setPartition(StringRef S) {
+ // Do nothing if we're clearing the partition and it is already empty.
+ if (!hasPartition() && S.empty())
+ return;
+
+ // Get or create a stable partition name string and put it in the table in the
+ // context.
+ if (!S.empty())
+ S = getContext().pImpl->Saver.save(S);
+ getContext().pImpl->GlobalValuePartitions[this] = S;
+
+ // Update the HasPartition field. Setting the partition to the empty string
+ // means this global no longer has a partition.
+ HasPartition = !S.empty();
+}
+
+using SanitizerMetadata = GlobalValue::SanitizerMetadata;
+const SanitizerMetadata &GlobalValue::getSanitizerMetadata() const {
+ assert(hasSanitizerMetadata());
+ assert(getContext().pImpl->GlobalValueSanitizerMetadata.count(this));
+ return getContext().pImpl->GlobalValueSanitizerMetadata[this];
+}
+
+void GlobalValue::setSanitizerMetadata(SanitizerMetadata Meta) {
+ getContext().pImpl->GlobalValueSanitizerMetadata[this] = Meta;
+ HasSanitizerMetadata = true;
+}
+
+void GlobalValue::removeSanitizerMetadata() {
+ DenseMap<const GlobalValue *, SanitizerMetadata> &MetadataMap =
+ getContext().pImpl->GlobalValueSanitizerMetadata;
+ MetadataMap.erase(this);
+ HasSanitizerMetadata = false;
+}
+
+StringRef GlobalObject::getSectionImpl() const {
+ assert(hasSection());
+ return getContext().pImpl->GlobalObjectSections[this];
+}
+
+void GlobalObject::setSection(StringRef S) {
+ // Do nothing if we're clearing the section and it is already empty.
+ if (!hasSection() && S.empty())
+ return;
+
+ // Get or create a stable section name string and put it in the table in the
+ // context.
+ if (!S.empty())
+ S = getContext().pImpl->Saver.save(S);
+ getContext().pImpl->GlobalObjectSections[this] = S;
+
+ // Update the HasSectionHashEntryBit. Setting the section to the empty string
+ // means this global no longer has a section.
+ setGlobalObjectFlag(HasSectionHashEntryBit, !S.empty());
+}
+
+bool GlobalValue::isDeclaration() const {
+ // Globals are definitions if they have an initializer.
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(this))
+ return GV->getNumOperands() == 0;
+
+ // Functions are definitions if they have a body.
+ if (const Function *F = dyn_cast<Function>(this))
+ return F->empty() && !F->isMaterializable();
+
+ // Aliases and ifuncs are always definitions.
+ assert(isa<GlobalAlias>(this) || isa<GlobalIFunc>(this));
+ return false;
+}
+
+bool GlobalObject::canIncreaseAlignment() const {
+ // Firstly, can only increase the alignment of a global if it
+ // is a strong definition.
+ if (!isStrongDefinitionForLinker())
+ return false;
+
+ // It also has to either not have a section defined, or, not have
+ // alignment specified. (If it is assigned a section, the global
+ // could be densely packed with other objects in the section, and
+ // increasing the alignment could cause padding issues.)
+ if (hasSection() && getAlign())
+ return false;
+
+ // On ELF platforms, we're further restricted in that we can't
+ // increase the alignment of any variable which might be emitted
+ // into a shared library, and which is exported. If the main
+ // executable accesses a variable found in a shared-lib, the main
+ // exe actually allocates memory for and exports the symbol ITSELF,
+ // overriding the symbol found in the library. That is, at link
+ // time, the observed alignment of the variable is copied into the
+ // executable binary. (A COPY relocation is also generated, to copy
+ // the initial data from the shadowed variable in the shared-lib
+ // into the location in the main binary, before running code.)
+ //
+ // And thus, even though you might think you are defining the
+ // global, and allocating the memory for the global in your object
+ // file, and thus should be able to set the alignment arbitrarily,
+ // that's not actually true. Doing so can cause an ABI breakage; an
+ // executable might have already been built with the previous
+ // alignment of the variable, and then assuming an increased
+ // alignment will be incorrect.
+
+ // Conservatively assume ELF if there's no parent pointer.
+ bool isELF =
+ (!Parent || Triple(Parent->getTargetTriple()).isOSBinFormatELF());
+ if (isELF && !isDSOLocal())
+ return false;
+
+ return true;
+}
+
+static const GlobalObject *
+findBaseObject(const Constant *C, DenseSet<const GlobalAlias *> &Aliases) {
+ if (auto *GO = dyn_cast<GlobalObject>(C))
+ return GO;
+ if (auto *GA = dyn_cast<GlobalAlias>(C))
+ if (Aliases.insert(GA).second)
+ return findBaseObject(GA->getOperand(0), Aliases);
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
+ switch (CE->getOpcode()) {
+ case Instruction::Add: {
+ auto *LHS = findBaseObject(CE->getOperand(0), Aliases);
+ auto *RHS = findBaseObject(CE->getOperand(1), Aliases);
+ if (LHS && RHS)
+ return nullptr;
+ return LHS ? LHS : RHS;
+ }
+ case Instruction::Sub: {
+ if (findBaseObject(CE->getOperand(1), Aliases))
+ return nullptr;
+ return findBaseObject(CE->getOperand(0), Aliases);
+ }
+ case Instruction::IntToPtr:
+ case Instruction::PtrToInt:
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ return findBaseObject(CE->getOperand(0), Aliases);
+ default:
+ break;
+ }
+ }
+ return nullptr;
+}
+
+const GlobalObject *GlobalValue::getAliaseeObject() const {
+ DenseSet<const GlobalAlias *> Aliases;
+ return findBaseObject(this, Aliases);
+}
+
+bool GlobalValue::isAbsoluteSymbolRef() const {
+ auto *GO = dyn_cast<GlobalObject>(this);
+ if (!GO)
+ return false;
+
+ return GO->getMetadata(LLVMContext::MD_absolute_symbol);
+}
+
+Optional<ConstantRange> GlobalValue::getAbsoluteSymbolRange() const {
+ auto *GO = dyn_cast<GlobalObject>(this);
+ if (!GO)
+ return None;
+
+ MDNode *MD = GO->getMetadata(LLVMContext::MD_absolute_symbol);
+ if (!MD)
+ return None;
+
+ return getConstantRangeFromMetadata(*MD);
+}
+
+bool GlobalValue::canBeOmittedFromSymbolTable() const {
+ if (!hasLinkOnceODRLinkage())
+ return false;
+
+ // We assume that anyone who sets global unnamed_addr on a non-constant
+ // knows what they're doing.
+ if (hasGlobalUnnamedAddr())
+ return true;
+
+ // If it is a non constant variable, it needs to be uniqued across shared
+ // objects.
+ if (auto *Var = dyn_cast<GlobalVariable>(this))
+ if (!Var->isConstant())
+ return false;
+
+ return hasAtLeastLocalUnnamedAddr();
+}
+
+//===----------------------------------------------------------------------===//
+// GlobalVariable Implementation
+//===----------------------------------------------------------------------===//
+
+GlobalVariable::GlobalVariable(Type *Ty, bool constant, LinkageTypes Link,
+ Constant *InitVal, const Twine &Name,
+ ThreadLocalMode TLMode, unsigned AddressSpace,
+ bool isExternallyInitialized)
+ : GlobalObject(Ty, Value::GlobalVariableVal,
+ OperandTraits<GlobalVariable>::op_begin(this),
+ InitVal != nullptr, Link, Name, AddressSpace),
+ isConstantGlobal(constant),
+ isExternallyInitializedConstant(isExternallyInitialized) {
+ assert(!Ty->isFunctionTy() && PointerType::isValidElementType(Ty) &&
+ "invalid type for global variable");
+ setThreadLocalMode(TLMode);
+ if (InitVal) {
+ assert(InitVal->getType() == Ty &&
+ "Initializer should be the same type as the GlobalVariable!");
+ Op<0>() = InitVal;
+ }
+}
+
+GlobalVariable::GlobalVariable(Module &M, Type *Ty, bool constant,
+ LinkageTypes Link, Constant *InitVal,
+ const Twine &Name, GlobalVariable *Before,
+ ThreadLocalMode TLMode,
+ Optional<unsigned> AddressSpace,
+ bool isExternallyInitialized)
+ : GlobalObject(Ty, Value::GlobalVariableVal,
+ OperandTraits<GlobalVariable>::op_begin(this),
+ InitVal != nullptr, Link, Name,
+ AddressSpace
+ ? *AddressSpace
+ : M.getDataLayout().getDefaultGlobalsAddressSpace()),
+ isConstantGlobal(constant),
+ isExternallyInitializedConstant(isExternallyInitialized) {
+ assert(!Ty->isFunctionTy() && PointerType::isValidElementType(Ty) &&
+ "invalid type for global variable");
+ setThreadLocalMode(TLMode);
+ if (InitVal) {
+ assert(InitVal->getType() == Ty &&
+ "Initializer should be the same type as the GlobalVariable!");
+ Op<0>() = InitVal;
+ }
+
+ if (Before)
+ Before->getParent()->getGlobalList().insert(Before->getIterator(), this);
+ else
+ M.getGlobalList().push_back(this);
+}
+
+void GlobalVariable::removeFromParent() {
+ getParent()->getGlobalList().remove(getIterator());
+}
+
+void GlobalVariable::eraseFromParent() {
+ getParent()->getGlobalList().erase(getIterator());
+}
+
+void GlobalVariable::setInitializer(Constant *InitVal) {
+ if (!InitVal) {
+ if (hasInitializer()) {
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. Clearing the operand then clearing the num
+ // operands ensures we have the correct offset to the operand.
+ Op<0>().set(nullptr);
+ setGlobalVariableNumOperands(0);
+ }
+ } else {
+ assert(InitVal->getType() == getValueType() &&
+ "Initializer type must match GlobalVariable type");
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. We need to set num operands to 1 first so that
+ // we get the correct offset to the first operand when we set it.
+ if (!hasInitializer())
+ setGlobalVariableNumOperands(1);
+ Op<0>().set(InitVal);
+ }
+}
+
+/// Copy all additional attributes (those not needed to create a GlobalVariable)
+/// from the GlobalVariable Src to this one.
+void GlobalVariable::copyAttributesFrom(const GlobalVariable *Src) {
+ GlobalObject::copyAttributesFrom(Src);
+ setExternallyInitialized(Src->isExternallyInitialized());
+ setAttributes(Src->getAttributes());
+}
+
+void GlobalVariable::dropAllReferences() {
+ User::dropAllReferences();
+ clearMetadata();
+}
+
+//===----------------------------------------------------------------------===//
+// GlobalAlias Implementation
+//===----------------------------------------------------------------------===//
+
+GlobalAlias::GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Link,
+ const Twine &Name, Constant *Aliasee,
+ Module *ParentModule)
+ : GlobalValue(Ty, Value::GlobalAliasVal, &Op<0>(), 1, Link, Name,
+ AddressSpace) {
+ setAliasee(Aliasee);
+ if (ParentModule)
+ ParentModule->getAliasList().push_back(this);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Link, const Twine &Name,
+ Constant *Aliasee, Module *ParentModule) {
+ return new GlobalAlias(Ty, AddressSpace, Link, Name, Aliasee, ParentModule);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ Module *Parent) {
+ return create(Ty, AddressSpace, Linkage, Name, nullptr, Parent);
+}
+
+GlobalAlias *GlobalAlias::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ GlobalValue *Aliasee) {
+ return create(Ty, AddressSpace, Linkage, Name, Aliasee, Aliasee->getParent());
+}
+
+GlobalAlias *GlobalAlias::create(LinkageTypes Link, const Twine &Name,
+ GlobalValue *Aliasee) {
+ return create(Aliasee->getValueType(), Aliasee->getAddressSpace(), Link, Name,
+ Aliasee);
+}
+
+GlobalAlias *GlobalAlias::create(const Twine &Name, GlobalValue *Aliasee) {
+ return create(Aliasee->getLinkage(), Name, Aliasee);
+}
+
+void GlobalAlias::removeFromParent() {
+ getParent()->getAliasList().remove(getIterator());
+}
+
+void GlobalAlias::eraseFromParent() {
+ getParent()->getAliasList().erase(getIterator());
+}
+
+void GlobalAlias::setAliasee(Constant *Aliasee) {
+ assert((!Aliasee || Aliasee->getType() == getType()) &&
+ "Alias and aliasee types should match!");
+ Op<0>().set(Aliasee);
+}
+
+const GlobalObject *GlobalAlias::getAliaseeObject() const {
+ DenseSet<const GlobalAlias *> Aliases;
+ return findBaseObject(getOperand(0), Aliases);
+}
+
+//===----------------------------------------------------------------------===//
+// GlobalIFunc Implementation
+//===----------------------------------------------------------------------===//
+
+GlobalIFunc::GlobalIFunc(Type *Ty, unsigned AddressSpace, LinkageTypes Link,
+ const Twine &Name, Constant *Resolver,
+ Module *ParentModule)
+ : GlobalObject(Ty, Value::GlobalIFuncVal, &Op<0>(), 1, Link, Name,
+ AddressSpace) {
+ setResolver(Resolver);
+ if (ParentModule)
+ ParentModule->getIFuncList().push_back(this);
+}
+
+GlobalIFunc *GlobalIFunc::create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Link, const Twine &Name,
+ Constant *Resolver, Module *ParentModule) {
+ return new GlobalIFunc(Ty, AddressSpace, Link, Name, Resolver, ParentModule);
+}
+
+void GlobalIFunc::removeFromParent() {
+ getParent()->getIFuncList().remove(getIterator());
+}
+
+void GlobalIFunc::eraseFromParent() {
+ getParent()->getIFuncList().erase(getIterator());
+}
+
+const Function *GlobalIFunc::getResolverFunction() const {
+ DenseSet<const GlobalAlias *> Aliases;
+ return dyn_cast<Function>(findBaseObject(getResolver(), Aliases));
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp b/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp
new file mode 100644
index 000000000000..d0c622fe2389
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/IRBuilder.cpp
@@ -0,0 +1,1305 @@
+//===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IRBuilder class, which is used as a convenient way
+// to create LLVM instructions with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/NoFolder.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+using namespace llvm;
+
+/// CreateGlobalString - Make a new global variable with an initializer that
+/// has array of i8 type filled in with the nul terminated string value
+/// specified. If Name is specified, it is the name of the global variable
+/// created.
+GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
+ const Twine &Name,
+ unsigned AddressSpace,
+ Module *M) {
+ Constant *StrConstant = ConstantDataArray::getString(Context, Str);
+ if (!M)
+ M = BB->getParent()->getParent();
+ auto *GV = new GlobalVariable(
+ *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
+ StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
+ GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(Align(1));
+ return GV;
+}
+
+Type *IRBuilderBase::getCurrentFunctionReturnType() const {
+ assert(BB && BB->getParent() && "No current function!");
+ return BB->getParent()->getReturnType();
+}
+
+Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
+ auto *PT = cast<PointerType>(Ptr->getType());
+ if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty()))
+ return Ptr;
+
+ // Otherwise, we need to insert a bitcast.
+ return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
+}
+
+DebugLoc IRBuilderBase::getCurrentDebugLocation() const {
+ for (auto &KV : MetadataToCopy)
+ if (KV.first == LLVMContext::MD_dbg)
+ return {cast<DILocation>(KV.second)};
+
+ return {};
+}
+void IRBuilderBase::SetInstDebugLocation(Instruction *I) const {
+ for (const auto &KV : MetadataToCopy)
+ if (KV.first == LLVMContext::MD_dbg) {
+ I->setDebugLoc(DebugLoc(KV.second));
+ return;
+ }
+}
+
+static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
+ IRBuilderBase *Builder,
+ const Twine &Name = "",
+ Instruction *FMFSource = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = {}) {
+ CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
+ if (FMFSource)
+ CI->copyFastMathFlags(FMFSource);
+ return CI;
+}
+
+Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
+ assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
+ if (cast<ConstantInt>(Scaling)->isZero())
+ return Scaling;
+ Module *M = GetInsertBlock()->getParent()->getParent();
+ Function *TheFn =
+ Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
+ CallInst *CI = createCallHelper(TheFn, {}, this, Name);
+ return cast<ConstantInt>(Scaling)->getSExtValue() == 1
+ ? CI
+ : CreateMul(CI, Scaling);
+}
+
+Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
+ Type *STy = DstType->getScalarType();
+ if (isa<ScalableVectorType>(DstType)) {
+ Type *StepVecType = DstType;
+ // TODO: We expect this special case (element type < 8 bits) to be
+ // temporary - once the intrinsic properly supports < 8 bits this code
+ // can be removed.
+ if (STy->getScalarSizeInBits() < 8)
+ StepVecType =
+ VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
+ Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
+ {StepVecType}, {}, nullptr, Name);
+ if (StepVecType != DstType)
+ Res = CreateTrunc(Res, DstType);
+ return Res;
+ }
+
+ unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
+
+ // Create a vector of consecutive numbers from zero to VF.
+ SmallVector<Constant *, 8> Indices;
+ for (unsigned i = 0; i < NumEls; ++i)
+ Indices.push_back(ConstantInt::get(STy, i));
+
+ // Add the consecutive indices to the vector value.
+ return ConstantVector::get(Indices);
+}
+
+CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
+ MaybeAlign Align, bool isVolatile,
+ MDNode *TBAATag, MDNode *ScopeTag,
+ MDNode *NoAliasTag) {
+ Ptr = getCastedInt8PtrValue(Ptr);
+ Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
+ Type *Tys[] = { Ptr->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ if (Align)
+ cast<MemSetInst>(CI)->setDestAlignment(*Align);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
+ Value *Val, Value *Size,
+ bool IsVolatile, MDNode *TBAATag,
+ MDNode *ScopeTag,
+ MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
+ Type *Tys[] = {Dst->getType(), Size->getType()};
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ if (DstAlign)
+ cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
+ Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
+ MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
+
+ Ptr = getCastedInt8PtrValue(Ptr);
+ Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
+ Type *Tys[] = {Ptr->getType(), Size->getType()};
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(
+ M, Intrinsic::memset_element_unordered_atomic, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateMemTransferInst(
+ Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
+ MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
+ MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
+ Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ auto* MCI = cast<MemTransferInst>(CI);
+ if (DstAlign)
+ MCI->setDestAlignment(*DstAlign);
+ if (SrcAlign)
+ MCI->setSourceAlignment(*SrcAlign);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateMemCpyInline(
+ Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign,
+ Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
+ MDNode *ScopeTag, MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)};
+ Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
+ Function *F = BB->getParent();
+ Module *M = F->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ auto *MCI = cast<MemCpyInlineInst>(CI);
+ if (DstAlign)
+ MCI->setDestAlignment(*DstAlign);
+ if (SrcAlign)
+ MCI->setSourceAlignment(*SrcAlign);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (ScopeTag)
+ MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
+ Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
+ uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
+ MDNode *ScopeTag, MDNode *NoAliasTag) {
+ assert(DstAlign >= ElementSize &&
+ "Pointer alignment must be at least element size");
+ assert(SrcAlign >= ElementSize &&
+ "Pointer alignment must be at least element size");
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
+ Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(
+ M, Intrinsic::memcpy_element_unordered_atomic, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ // Set the alignment of the pointer args.
+ auto *AMCI = cast<AtomicMemCpyInst>(CI);
+ AMCI->setDestAlignment(DstAlign);
+ AMCI->setSourceAlignment(SrcAlign);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
+ Value *Src, MaybeAlign SrcAlign,
+ Value *Size, bool isVolatile,
+ MDNode *TBAATag, MDNode *ScopeTag,
+ MDNode *NoAliasTag) {
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
+ Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ auto *MMI = cast<MemMoveInst>(CI);
+ if (DstAlign)
+ MMI->setDestAlignment(*DstAlign);
+ if (SrcAlign)
+ MMI->setSourceAlignment(*SrcAlign);
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
+ Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
+ uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
+ MDNode *ScopeTag, MDNode *NoAliasTag) {
+ assert(DstAlign >= ElementSize &&
+ "Pointer alignment must be at least element size");
+ assert(SrcAlign >= ElementSize &&
+ "Pointer alignment must be at least element size");
+ Dst = getCastedInt8PtrValue(Dst);
+ Src = getCastedInt8PtrValue(Src);
+
+ Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
+ Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(
+ M, Intrinsic::memmove_element_unordered_atomic, Tys);
+
+ CallInst *CI = createCallHelper(TheFn, Ops, this);
+
+ // Set the alignment of the pointer args.
+ CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
+ CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
+
+ // Set the TBAA info if present.
+ if (TBAATag)
+ CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+
+ // Set the TBAA Struct info if present.
+ if (TBAAStructTag)
+ CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
+
+ if (ScopeTag)
+ CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
+
+ if (NoAliasTag)
+ CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
+
+ return CI;
+}
+
+static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
+ Value *Src) {
+ Module *M = Builder->GetInsertBlock()->getParent()->getParent();
+ Value *Ops[] = {Src};
+ Type *Tys[] = { Src->getType() };
+ auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
+ return createCallHelper(Decl, Ops, Builder);
+}
+
+CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
+ Module *M = GetInsertBlock()->getParent()->getParent();
+ Value *Ops[] = {Acc, Src};
+ auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
+ {Src->getType()});
+ return createCallHelper(Decl, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
+ Module *M = GetInsertBlock()->getParent()->getParent();
+ Value *Ops[] = {Acc, Src};
+ auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
+ {Src->getType()});
+ return createCallHelper(Decl, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src);
+}
+
+CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src);
+}
+
+CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src);
+}
+
+CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src);
+}
+
+CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src);
+}
+
+CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
+ auto ID =
+ IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
+ return getReductionIntrinsic(this, ID, Src);
+}
+
+CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
+ auto ID =
+ IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
+ return getReductionIntrinsic(this, ID, Src);
+}
+
+CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src);
+}
+
+CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
+ return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src);
+}
+
+CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "lifetime.start only applies to pointers.");
+ Ptr = getCastedInt8PtrValue(Ptr);
+ if (!Size)
+ Size = getInt64(-1);
+ else
+ assert(Size->getType() == getInt64Ty() &&
+ "lifetime.start requires the size to be an i64");
+ Value *Ops[] = { Size, Ptr };
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn =
+ Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
+ return createCallHelper(TheFn, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "lifetime.end only applies to pointers.");
+ Ptr = getCastedInt8PtrValue(Ptr);
+ if (!Size)
+ Size = getInt64(-1);
+ else
+ assert(Size->getType() == getInt64Ty() &&
+ "lifetime.end requires the size to be an i64");
+ Value *Ops[] = { Size, Ptr };
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn =
+ Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
+ return createCallHelper(TheFn, Ops, this);
+}
+
+CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
+
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "invariant.start only applies to pointers.");
+ Ptr = getCastedInt8PtrValue(Ptr);
+ if (!Size)
+ Size = getInt64(-1);
+ else
+ assert(Size->getType() == getInt64Ty() &&
+ "invariant.start requires the size to be an i64");
+
+ Value *Ops[] = {Size, Ptr};
+ // Fill in the single overloaded type: memory object type.
+ Type *ObjectPtr[1] = {Ptr->getType()};
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn =
+ Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
+ return createCallHelper(TheFn, Ops, this);
+}
+
+CallInst *
+IRBuilderBase::CreateAssumption(Value *Cond,
+ ArrayRef<OperandBundleDef> OpBundles) {
+ assert(Cond->getType() == getInt1Ty() &&
+ "an assumption condition must be of type i1");
+
+ Value *Ops[] = { Cond };
+ Module *M = BB->getParent()->getParent();
+ Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
+ return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
+}
+
+Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
+ Module *M = BB->getModule();
+ auto *FnIntrinsic = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_noalias_scope_decl, {});
+ return createCallHelper(FnIntrinsic, {Scope}, this);
+}
+
+/// Create a call to a Masked Load intrinsic.
+/// \p Ty - vector type to load
+/// \p Ptr - base pointer for the load
+/// \p Alignment - alignment of the source location
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+/// \p PassThru - pass-through value that is used to fill the masked-off lanes
+/// of the result
+/// \p Name - name of the result variable
+CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
+ Value *Mask, Value *PassThru,
+ const Twine &Name) {
+ auto *PtrTy = cast<PointerType>(Ptr->getType());
+ assert(Ty->isVectorTy() && "Type should be vector");
+ assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type");
+ assert(Mask && "Mask should not be all-ones (null)");
+ if (!PassThru)
+ PassThru = UndefValue::get(Ty);
+ Type *OverloadedTypes[] = { Ty, PtrTy };
+ Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
+ return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
+ OverloadedTypes, Name);
+}
+
+/// Create a call to a Masked Store intrinsic.
+/// \p Val - data to be stored,
+/// \p Ptr - base pointer for the store
+/// \p Alignment - alignment of the destination location
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
+ Align Alignment, Value *Mask) {
+ auto *PtrTy = cast<PointerType>(Ptr->getType());
+ Type *DataTy = Val->getType();
+ assert(DataTy->isVectorTy() && "Val should be a vector");
+ assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type");
+ assert(Mask && "Mask should not be all-ones (null)");
+ Type *OverloadedTypes[] = { DataTy, PtrTy };
+ Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
+ return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
+}
+
+/// Create a call to a Masked intrinsic, with given intrinsic Id,
+/// an array of operands - Ops, and an array of overloaded types -
+/// OverloadedTypes.
+CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
+ ArrayRef<Value *> Ops,
+ ArrayRef<Type *> OverloadedTypes,
+ const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
+ return createCallHelper(TheFn, Ops, this, Name);
+}
+
+/// Create a call to a Masked Gather intrinsic.
+/// \p Ty - vector type to gather
+/// \p Ptrs - vector of pointers for loading
+/// \p Align - alignment for one element
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+/// \p PassThru - pass-through value that is used to fill the masked-off lanes
+/// of the result
+/// \p Name - name of the result variable
+CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
+ Align Alignment, Value *Mask,
+ Value *PassThru,
+ const Twine &Name) {
+ auto *VecTy = cast<VectorType>(Ty);
+ ElementCount NumElts = VecTy->getElementCount();
+ auto *PtrsTy = cast<VectorType>(Ptrs->getType());
+ assert(cast<PointerType>(PtrsTy->getElementType())
+ ->isOpaqueOrPointeeTypeMatches(
+ cast<VectorType>(Ty)->getElementType()) &&
+ "Element type mismatch");
+ assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
+
+ if (!Mask)
+ Mask = Constant::getAllOnesValue(
+ VectorType::get(Type::getInt1Ty(Context), NumElts));
+
+ if (!PassThru)
+ PassThru = UndefValue::get(Ty);
+
+ Type *OverloadedTypes[] = {Ty, PtrsTy};
+ Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
+
+ // We specify only one type when we create this intrinsic. Types of other
+ // arguments are derived from this type.
+ return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
+ Name);
+}
+
+/// Create a call to a Masked Scatter intrinsic.
+/// \p Data - data to be stored,
+/// \p Ptrs - the vector of pointers, where the \p Data elements should be
+/// stored
+/// \p Align - alignment for one element
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
+ Align Alignment, Value *Mask) {
+ auto *PtrsTy = cast<VectorType>(Ptrs->getType());
+ auto *DataTy = cast<VectorType>(Data->getType());
+ ElementCount NumElts = PtrsTy->getElementCount();
+
+#ifndef NDEBUG
+ auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
+ assert(NumElts == DataTy->getElementCount() &&
+ PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
+ "Incompatible pointer and data types");
+#endif
+
+ if (!Mask)
+ Mask = Constant::getAllOnesValue(
+ VectorType::get(Type::getInt1Ty(Context), NumElts));
+
+ Type *OverloadedTypes[] = {DataTy, PtrsTy};
+ Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
+
+ // We specify only one type when we create this intrinsic. Types of other
+ // arguments are derived from this type.
+ return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
+}
+
+template <typename T0>
+static std::vector<Value *>
+getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
+ std::vector<Value *> Args;
+ Args.push_back(B.getInt64(ID));
+ Args.push_back(B.getInt32(NumPatchBytes));
+ Args.push_back(ActualCallee);
+ Args.push_back(B.getInt32(CallArgs.size()));
+ Args.push_back(B.getInt32(Flags));
+ llvm::append_range(Args, CallArgs);
+ // GC Transition and Deopt args are now always handled via operand bundle.
+ // They will be removed from the signature of gc.statepoint shortly.
+ Args.push_back(B.getInt32(0));
+ Args.push_back(B.getInt32(0));
+ // GC args are now encoded in the gc-live operand bundle
+ return Args;
+}
+
+template<typename T1, typename T2, typename T3>
+static std::vector<OperandBundleDef>
+getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
+ Optional<ArrayRef<T2>> DeoptArgs,
+ ArrayRef<T3> GCArgs) {
+ std::vector<OperandBundleDef> Rval;
+ if (DeoptArgs) {
+ SmallVector<Value*, 16> DeoptValues;
+ llvm::append_range(DeoptValues, *DeoptArgs);
+ Rval.emplace_back("deopt", DeoptValues);
+ }
+ if (TransitionArgs) {
+ SmallVector<Value*, 16> TransitionValues;
+ llvm::append_range(TransitionValues, *TransitionArgs);
+ Rval.emplace_back("gc-transition", TransitionValues);
+ }
+ if (GCArgs.size()) {
+ SmallVector<Value*, 16> LiveValues;
+ llvm::append_range(LiveValues, GCArgs);
+ Rval.emplace_back("gc-live", LiveValues);
+ }
+ return Rval;
+}
+
+template <typename T0, typename T1, typename T2, typename T3>
+static CallInst *CreateGCStatepointCallCommon(
+ IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
+ FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
+ Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
+ ArrayRef<T3> GCArgs, const Twine &Name) {
+ Module *M = Builder->GetInsertBlock()->getParent()->getParent();
+ // Fill in the one generic type'd argument (the function is also vararg)
+ Function *FnStatepoint =
+ Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
+ {ActualCallee.getCallee()->getType()});
+
+ std::vector<Value *> Args = getStatepointArgs(
+ *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
+
+ CallInst *CI = Builder->CreateCall(
+ FnStatepoint, Args,
+ getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
+ CI->addParamAttr(2,
+ Attribute::get(Builder->getContext(), Attribute::ElementType,
+ ActualCallee.getFunctionType()));
+ return CI;
+}
+
+CallInst *IRBuilderBase::CreateGCStatepointCall(
+ uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
+ ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
+ CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCStatepointCall(
+ uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
+ uint32_t Flags, ArrayRef<Value *> CallArgs,
+ Optional<ArrayRef<Use>> TransitionArgs, Optional<ArrayRef<Use>> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
+ this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
+ DeoptArgs, GCArgs, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCStatepointCall(
+ uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
+ ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
+ CallArgs, None, DeoptArgs, GCArgs, Name);
+}
+
+template <typename T0, typename T1, typename T2, typename T3>
+static InvokeInst *CreateGCStatepointInvokeCommon(
+ IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
+ FunctionCallee ActualInvokee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs,
+ Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
+ ArrayRef<T3> GCArgs, const Twine &Name) {
+ Module *M = Builder->GetInsertBlock()->getParent()->getParent();
+ // Fill in the one generic type'd argument (the function is also vararg)
+ Function *FnStatepoint =
+ Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
+ {ActualInvokee.getCallee()->getType()});
+
+ std::vector<Value *> Args =
+ getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
+ Flags, InvokeArgs);
+
+ InvokeInst *II = Builder->CreateInvoke(
+ FnStatepoint, NormalDest, UnwindDest, Args,
+ getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
+ II->addParamAttr(2,
+ Attribute::get(Builder->getContext(), Attribute::ElementType,
+ ActualInvokee.getFunctionType()));
+ return II;
+}
+
+InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest,
+ ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name) {
+ return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
+ uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
+ DeoptArgs, GCArgs, Name);
+}
+
+InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
+ ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
+ Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
+ const Twine &Name) {
+ return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
+ this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
+ InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
+}
+
+InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
+ Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs,
+ const Twine &Name) {
+ return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
+ this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
+ uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
+ Name);
+}
+
+CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
+ Type *ResultType, const Twine &Name) {
+ Intrinsic::ID ID = Intrinsic::experimental_gc_result;
+ Module *M = BB->getParent()->getParent();
+ Type *Types[] = {ResultType};
+ Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
+
+ Value *Args[] = {Statepoint};
+ return createCallHelper(FnGCResult, Args, this, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
+ int BaseOffset, int DerivedOffset,
+ Type *ResultType, const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Type *Types[] = {ResultType};
+ Function *FnGCRelocate =
+ Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
+
+ Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
+ return createCallHelper(FnGCRelocate, Args, this, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
+ const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Type *PtrTy = DerivedPtr->getType();
+ Function *FnGCFindBase = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
+ return createCallHelper(FnGCFindBase, {DerivedPtr}, this, Name);
+}
+
+CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
+ const Twine &Name) {
+ Module *M = BB->getParent()->getParent();
+ Type *PtrTy = DerivedPtr->getType();
+ Function *FnGCGetOffset = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
+ return createCallHelper(FnGCGetOffset, {DerivedPtr}, this, Name);
+}
+
+CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
+ Instruction *FMFSource,
+ const Twine &Name) {
+ Module *M = BB->getModule();
+ Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
+ return createCallHelper(Fn, {V}, this, Name, FMFSource);
+}
+
+CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
+ Value *RHS,
+ Instruction *FMFSource,
+ const Twine &Name) {
+ Module *M = BB->getModule();
+ Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
+ return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
+}
+
+CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
+ ArrayRef<Type *> Types,
+ ArrayRef<Value *> Args,
+ Instruction *FMFSource,
+ const Twine &Name) {
+ Module *M = BB->getModule();
+ Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
+ return createCallHelper(Fn, Args, this, Name, FMFSource);
+}
+
+CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
+ Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
+ const Twine &Name, MDNode *FPMathTag,
+ Optional<RoundingMode> Rounding,
+ Optional<fp::ExceptionBehavior> Except) {
+ Value *RoundingV = getConstrainedFPRounding(Rounding);
+ Value *ExceptV = getConstrainedFPExcept(Except);
+
+ FastMathFlags UseFMF = FMF;
+ if (FMFSource)
+ UseFMF = FMFSource->getFastMathFlags();
+
+ CallInst *C = CreateIntrinsic(ID, {L->getType()},
+ {L, R, RoundingV, ExceptV}, nullptr, Name);
+ setConstrainedFPCallAttr(C);
+ setFPAttrs(C, FPMathTag, UseFMF);
+ return C;
+}
+
+Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
+ const Twine &Name, MDNode *FPMathTag) {
+ if (Instruction::isBinaryOp(Opc)) {
+ assert(Ops.size() == 2 && "Invalid number of operands!");
+ return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
+ Ops[0], Ops[1], Name, FPMathTag);
+ }
+ if (Instruction::isUnaryOp(Opc)) {
+ assert(Ops.size() == 1 && "Invalid number of operands!");
+ return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
+ Ops[0], Name, FPMathTag);
+ }
+ llvm_unreachable("Unexpected opcode!");
+}
+
+CallInst *IRBuilderBase::CreateConstrainedFPCast(
+ Intrinsic::ID ID, Value *V, Type *DestTy,
+ Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
+ Optional<RoundingMode> Rounding,
+ Optional<fp::ExceptionBehavior> Except) {
+ Value *ExceptV = getConstrainedFPExcept(Except);
+
+ FastMathFlags UseFMF = FMF;
+ if (FMFSource)
+ UseFMF = FMFSource->getFastMathFlags();
+
+ CallInst *C;
+ bool HasRoundingMD = false;
+ switch (ID) {
+ default:
+ break;
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC: \
+ HasRoundingMD = ROUND_MODE; \
+ break;
+#include "llvm/IR/ConstrainedOps.def"
+ }
+ if (HasRoundingMD) {
+ Value *RoundingV = getConstrainedFPRounding(Rounding);
+ C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
+ nullptr, Name);
+ } else
+ C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
+ Name);
+
+ setConstrainedFPCallAttr(C);
+
+ if (isa<FPMathOperator>(C))
+ setFPAttrs(C, FPMathTag, UseFMF);
+ return C;
+}
+
+Value *IRBuilderBase::CreateFCmpHelper(
+ CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
+ MDNode *FPMathTag, bool IsSignaling) {
+ if (IsFPConstrained) {
+ auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
+ : Intrinsic::experimental_constrained_fcmp;
+ return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
+ }
+
+ if (auto *LC = dyn_cast<Constant>(LHS))
+ if (auto *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFCmp(P, LC, RC), Name);
+ return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
+}
+
+CallInst *IRBuilderBase::CreateConstrainedFPCmp(
+ Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
+ const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
+ Value *PredicateV = getConstrainedFPPredicate(P);
+ Value *ExceptV = getConstrainedFPExcept(Except);
+
+ CallInst *C = CreateIntrinsic(ID, {L->getType()},
+ {L, R, PredicateV, ExceptV}, nullptr, Name);
+ setConstrainedFPCallAttr(C);
+ return C;
+}
+
+CallInst *IRBuilderBase::CreateConstrainedFPCall(
+ Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
+ Optional<RoundingMode> Rounding,
+ Optional<fp::ExceptionBehavior> Except) {
+ llvm::SmallVector<Value *, 6> UseArgs;
+
+ append_range(UseArgs, Args);
+ bool HasRoundingMD = false;
+ switch (Callee->getIntrinsicID()) {
+ default:
+ break;
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC: \
+ HasRoundingMD = ROUND_MODE; \
+ break;
+#include "llvm/IR/ConstrainedOps.def"
+ }
+ if (HasRoundingMD)
+ UseArgs.push_back(getConstrainedFPRounding(Rounding));
+ UseArgs.push_back(getConstrainedFPExcept(Except));
+
+ CallInst *C = CreateCall(Callee, UseArgs, Name);
+ setConstrainedFPCallAttr(C);
+ return C;
+}
+
+Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
+ const Twine &Name, Instruction *MDFrom) {
+ if (auto *V = Folder.FoldSelect(C, True, False))
+ return V;
+
+ SelectInst *Sel = SelectInst::Create(C, True, False);
+ if (MDFrom) {
+ MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
+ MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
+ Sel = addBranchMetadata(Sel, Prof, Unpred);
+ }
+ if (isa<FPMathOperator>(Sel))
+ setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
+ return Insert(Sel, Name);
+}
+
+Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
+ const Twine &Name) {
+ assert(LHS->getType() == RHS->getType() &&
+ "Pointer subtraction operand types must match!");
+ assert(cast<PointerType>(LHS->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElemTy) &&
+ "Pointer type must match element type");
+ Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
+ Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
+ Value *Difference = CreateSub(LHS_int, RHS_int);
+ return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
+ Name);
+}
+
+Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "launder.invariant.group only applies to pointers.");
+ // FIXME: we could potentially avoid casts to/from i8*.
+ auto *PtrType = Ptr->getType();
+ auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
+ if (PtrType != Int8PtrTy)
+ Ptr = CreateBitCast(Ptr, Int8PtrTy);
+ Module *M = BB->getParent()->getParent();
+ Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
+ M, Intrinsic::launder_invariant_group, {Int8PtrTy});
+
+ assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
+ FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
+ Int8PtrTy &&
+ "LaunderInvariantGroup should take and return the same type");
+
+ CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
+
+ if (PtrType != Int8PtrTy)
+ return CreateBitCast(Fn, PtrType);
+ return Fn;
+}
+
+Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
+ assert(isa<PointerType>(Ptr->getType()) &&
+ "strip.invariant.group only applies to pointers.");
+
+ // FIXME: we could potentially avoid casts to/from i8*.
+ auto *PtrType = Ptr->getType();
+ auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
+ if (PtrType != Int8PtrTy)
+ Ptr = CreateBitCast(Ptr, Int8PtrTy);
+ Module *M = BB->getParent()->getParent();
+ Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
+ M, Intrinsic::strip_invariant_group, {Int8PtrTy});
+
+ assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
+ FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
+ Int8PtrTy &&
+ "StripInvariantGroup should take and return the same type");
+
+ CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
+
+ if (PtrType != Int8PtrTy)
+ return CreateBitCast(Fn, PtrType);
+ return Fn;
+}
+
+Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
+ auto *Ty = cast<VectorType>(V->getType());
+ if (isa<ScalableVectorType>(Ty)) {
+ Module *M = BB->getParent()->getParent();
+ Function *F = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_vector_reverse, Ty);
+ return Insert(CallInst::Create(F, V), Name);
+ }
+ // Keep the original behaviour for fixed vector
+ SmallVector<int, 8> ShuffleMask;
+ int NumElts = Ty->getElementCount().getKnownMinValue();
+ for (int i = 0; i < NumElts; ++i)
+ ShuffleMask.push_back(NumElts - i - 1);
+ return CreateShuffleVector(V, ShuffleMask, Name);
+}
+
+Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
+ const Twine &Name) {
+ assert(isa<VectorType>(V1->getType()) && "Unexpected type");
+ assert(V1->getType() == V2->getType() &&
+ "Splice expects matching operand types!");
+
+ if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
+ Module *M = BB->getParent()->getParent();
+ Function *F = Intrinsic::getDeclaration(
+ M, Intrinsic::experimental_vector_splice, VTy);
+
+ Value *Ops[] = {V1, V2, getInt32(Imm)};
+ return Insert(CallInst::Create(F, Ops), Name);
+ }
+
+ unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
+ assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
+ "Invalid immediate for vector splice!");
+
+ // Keep the original behaviour for fixed vector
+ unsigned Idx = (NumElts + Imm) % NumElts;
+ SmallVector<int, 8> Mask;
+ for (unsigned I = 0; I < NumElts; ++I)
+ Mask.push_back(Idx + I);
+
+ return CreateShuffleVector(V1, V2, Mask);
+}
+
+Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
+ const Twine &Name) {
+ auto EC = ElementCount::getFixed(NumElts);
+ return CreateVectorSplat(EC, V, Name);
+}
+
+Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
+ const Twine &Name) {
+ assert(EC.isNonZero() && "Cannot splat to an empty vector!");
+
+ // First insert it into a poison vector so we can shuffle it.
+ Type *I32Ty = getInt32Ty();
+ Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
+ V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0),
+ Name + ".splatinsert");
+
+ // Shuffle the value across the desired number of elements.
+ SmallVector<int, 16> Zeros;
+ Zeros.resize(EC.getKnownMinValue());
+ return CreateShuffleVector(V, Zeros, Name + ".splat");
+}
+
+Value *IRBuilderBase::CreateExtractInteger(
+ const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
+ uint64_t Offset, const Twine &Name) {
+ auto *IntTy = cast<IntegerType>(From->getType());
+ assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
+ DL.getTypeStoreSize(IntTy) &&
+ "Element extends past full value");
+ uint64_t ShAmt = 8 * Offset;
+ Value *V = From;
+ if (DL.isBigEndian())
+ ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
+ DL.getTypeStoreSize(ExtractedTy) - Offset);
+ if (ShAmt) {
+ V = CreateLShr(V, ShAmt, Name + ".shift");
+ }
+ assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
+ "Cannot extract to a larger integer!");
+ if (ExtractedTy != IntTy) {
+ V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
+ }
+ return V;
+}
+
+Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
+ Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
+ MDNode *DbgInfo) {
+ auto *BaseType = Base->getType();
+ assert(isa<PointerType>(BaseType) &&
+ "Invalid Base ptr type for preserve.array.access.index.");
+ assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
+ "Pointer element type mismatch");
+
+ Value *LastIndexV = getInt32(LastIndex);
+ Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ SmallVector<Value *, 4> IdxList(Dimension, Zero);
+ IdxList.push_back(LastIndexV);
+
+ Type *ResultType =
+ GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
+
+ Module *M = BB->getParent()->getParent();
+ Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
+ M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
+
+ Value *DimV = getInt32(Dimension);
+ CallInst *Fn =
+ CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
+ Fn->addParamAttr(
+ 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
+ if (DbgInfo)
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+
+ return Fn;
+}
+
+Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
+ Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
+ assert(isa<PointerType>(Base->getType()) &&
+ "Invalid Base ptr type for preserve.union.access.index.");
+ auto *BaseType = Base->getType();
+
+ Module *M = BB->getParent()->getParent();
+ Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
+ M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
+
+ Value *DIIndex = getInt32(FieldIndex);
+ CallInst *Fn =
+ CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
+ if (DbgInfo)
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+
+ return Fn;
+}
+
+Value *IRBuilderBase::CreatePreserveStructAccessIndex(
+ Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
+ MDNode *DbgInfo) {
+ auto *BaseType = Base->getType();
+ assert(isa<PointerType>(BaseType) &&
+ "Invalid Base ptr type for preserve.struct.access.index.");
+ assert(cast<PointerType>(BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
+ "Pointer element type mismatch");
+
+ Value *GEPIndex = getInt32(Index);
+ Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Type *ResultType =
+ GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
+
+ Module *M = BB->getParent()->getParent();
+ Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
+ M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
+
+ Value *DIIndex = getInt32(FieldIndex);
+ CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
+ {Base, GEPIndex, DIIndex});
+ Fn->addParamAttr(
+ 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
+ if (DbgInfo)
+ Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
+
+ return Fn;
+}
+
+CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
+ Value *PtrValue,
+ Value *AlignValue,
+ Value *OffsetValue) {
+ SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
+ if (OffsetValue)
+ Vals.push_back(OffsetValue);
+ OperandBundleDefT<Value *> AlignOpB("align", Vals);
+ return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
+}
+
+CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
+ Value *PtrValue,
+ unsigned Alignment,
+ Value *OffsetValue) {
+ assert(isa<PointerType>(PtrValue->getType()) &&
+ "trying to create an alignment assumption on a non-pointer?");
+ assert(Alignment != 0 && "Invalid Alignment");
+ auto *PtrTy = cast<PointerType>(PtrValue->getType());
+ Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
+ Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
+ return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
+}
+
+CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
+ Value *PtrValue,
+ Value *Alignment,
+ Value *OffsetValue) {
+ assert(isa<PointerType>(PtrValue->getType()) &&
+ "trying to create an alignment assumption on a non-pointer?");
+ return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
+}
+
+IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
+IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
+IRBuilderFolder::~IRBuilderFolder() = default;
+void ConstantFolder::anchor() {}
+void NoFolder::anchor() {}
diff --git a/contrib/llvm-project/llvm/lib/IR/IRPrintingPasses.cpp b/contrib/llvm-project/llvm/lib/IR/IRPrintingPasses.cpp
new file mode 100644
index 000000000000..8d6fe1eb6134
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/IRPrintingPasses.cpp
@@ -0,0 +1,140 @@
+//===--- IRPrintingPasses.cpp - Module and Function printing passes -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// PrintModulePass and PrintFunctionPass implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PrintPasses.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+PrintModulePass::PrintModulePass() : OS(dbgs()) {}
+PrintModulePass::PrintModulePass(raw_ostream &OS, const std::string &Banner,
+ bool ShouldPreserveUseListOrder)
+ : OS(OS), Banner(Banner),
+ ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {}
+
+PreservedAnalyses PrintModulePass::run(Module &M, ModuleAnalysisManager &) {
+ if (llvm::isFunctionInPrintList("*")) {
+ if (!Banner.empty())
+ OS << Banner << "\n";
+ M.print(OS, nullptr, ShouldPreserveUseListOrder);
+ }
+ else {
+ bool BannerPrinted = false;
+ for(const auto &F : M.functions()) {
+ if (llvm::isFunctionInPrintList(F.getName())) {
+ if (!BannerPrinted && !Banner.empty()) {
+ OS << Banner << "\n";
+ BannerPrinted = true;
+ }
+ F.print(OS);
+ }
+ }
+ }
+ return PreservedAnalyses::all();
+}
+
+PrintFunctionPass::PrintFunctionPass() : OS(dbgs()) {}
+PrintFunctionPass::PrintFunctionPass(raw_ostream &OS, const std::string &Banner)
+ : OS(OS), Banner(Banner) {}
+
+PreservedAnalyses PrintFunctionPass::run(Function &F,
+ FunctionAnalysisManager &) {
+ if (isFunctionInPrintList(F.getName())) {
+ if (forcePrintModuleIR())
+ OS << Banner << " (function: " << F.getName() << ")\n" << *F.getParent();
+ else
+ OS << Banner << '\n' << static_cast<Value &>(F);
+ }
+ return PreservedAnalyses::all();
+}
+
+namespace {
+
+class PrintModulePassWrapper : public ModulePass {
+ PrintModulePass P;
+
+public:
+ static char ID;
+ PrintModulePassWrapper() : ModulePass(ID) {}
+ PrintModulePassWrapper(raw_ostream &OS, const std::string &Banner,
+ bool ShouldPreserveUseListOrder)
+ : ModulePass(ID), P(OS, Banner, ShouldPreserveUseListOrder) {}
+
+ bool runOnModule(Module &M) override {
+ ModuleAnalysisManager DummyMAM;
+ P.run(M, DummyMAM);
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ StringRef getPassName() const override { return "Print Module IR"; }
+};
+
+class PrintFunctionPassWrapper : public FunctionPass {
+ PrintFunctionPass P;
+
+public:
+ static char ID;
+ PrintFunctionPassWrapper() : FunctionPass(ID) {}
+ PrintFunctionPassWrapper(raw_ostream &OS, const std::string &Banner)
+ : FunctionPass(ID), P(OS, Banner) {}
+
+ // This pass just prints a banner followed by the function as it's processed.
+ bool runOnFunction(Function &F) override {
+ FunctionAnalysisManager DummyFAM;
+ P.run(F, DummyFAM);
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ StringRef getPassName() const override { return "Print Function IR"; }
+};
+
+}
+
+char PrintModulePassWrapper::ID = 0;
+INITIALIZE_PASS(PrintModulePassWrapper, "print-module",
+ "Print module to stderr", false, true)
+char PrintFunctionPassWrapper::ID = 0;
+INITIALIZE_PASS(PrintFunctionPassWrapper, "print-function",
+ "Print function to stderr", false, true)
+
+ModulePass *llvm::createPrintModulePass(llvm::raw_ostream &OS,
+ const std::string &Banner,
+ bool ShouldPreserveUseListOrder) {
+ return new PrintModulePassWrapper(OS, Banner, ShouldPreserveUseListOrder);
+}
+
+FunctionPass *llvm::createPrintFunctionPass(llvm::raw_ostream &OS,
+ const std::string &Banner) {
+ return new PrintFunctionPassWrapper(OS, Banner);
+}
+
+bool llvm::isIRPrintingPass(Pass *P) {
+ const char *PID = (const char*)P->getPassID();
+
+ return (PID == &PrintModulePassWrapper::ID) ||
+ (PID == &PrintFunctionPassWrapper::ID);
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/InlineAsm.cpp b/contrib/llvm-project/llvm/lib/IR/InlineAsm.cpp
new file mode 100644
index 000000000000..c75b1aa7c1d6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/InlineAsm.cpp
@@ -0,0 +1,316 @@
+//===- InlineAsm.cpp - Implement the InlineAsm class ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the InlineAsm class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/InlineAsm.h"
+#include "ConstantsContext.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Errc.h"
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <cstdlib>
+
+using namespace llvm;
+
+InlineAsm::InlineAsm(FunctionType *FTy, const std::string &asmString,
+ const std::string &constraints, bool hasSideEffects,
+ bool isAlignStack, AsmDialect asmDialect, bool canThrow)
+ : Value(PointerType::getUnqual(FTy), Value::InlineAsmVal),
+ AsmString(asmString), Constraints(constraints), FTy(FTy),
+ HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack),
+ Dialect(asmDialect), CanThrow(canThrow) {
+#ifndef NDEBUG
+ // Do various checks on the constraint string and type.
+ cantFail(verify(getFunctionType(), constraints));
+#endif
+}
+
+InlineAsm *InlineAsm::get(FunctionType *FTy, StringRef AsmString,
+ StringRef Constraints, bool hasSideEffects,
+ bool isAlignStack, AsmDialect asmDialect,
+ bool canThrow) {
+ InlineAsmKeyType Key(AsmString, Constraints, FTy, hasSideEffects,
+ isAlignStack, asmDialect, canThrow);
+ LLVMContextImpl *pImpl = FTy->getContext().pImpl;
+ return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(FTy), Key);
+}
+
+void InlineAsm::destroyConstant() {
+ getType()->getContext().pImpl->InlineAsms.remove(this);
+ delete this;
+}
+
+FunctionType *InlineAsm::getFunctionType() const {
+ return FTy;
+}
+
+/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
+/// fields in this structure. If the constraint string is not understood,
+/// return true, otherwise return false.
+bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
+ InlineAsm::ConstraintInfoVector &ConstraintsSoFar) {
+ StringRef::iterator I = Str.begin(), E = Str.end();
+ unsigned multipleAlternativeCount = Str.count('|') + 1;
+ unsigned multipleAlternativeIndex = 0;
+ ConstraintCodeVector *pCodes = &Codes;
+
+ // Initialize
+ isMultipleAlternative = multipleAlternativeCount > 1;
+ if (isMultipleAlternative) {
+ multipleAlternatives.resize(multipleAlternativeCount);
+ pCodes = &multipleAlternatives[0].Codes;
+ }
+ Type = isInput;
+ isEarlyClobber = false;
+ MatchingInput = -1;
+ isCommutative = false;
+ isIndirect = false;
+ currentAlternativeIndex = 0;
+
+ // Parse prefixes.
+ if (*I == '~') {
+ Type = isClobber;
+ ++I;
+
+ // '{' must immediately follow '~'.
+ if (I != E && *I != '{')
+ return true;
+ } else if (*I == '=') {
+ ++I;
+ Type = isOutput;
+ }
+
+ if (*I == '*') {
+ isIndirect = true;
+ ++I;
+ }
+
+ if (I == E) return true; // Just a prefix, like "==" or "~".
+
+ // Parse the modifiers.
+ bool DoneWithModifiers = false;
+ while (!DoneWithModifiers) {
+ switch (*I) {
+ default:
+ DoneWithModifiers = true;
+ break;
+ case '&': // Early clobber.
+ if (Type != isOutput || // Cannot early clobber anything but output.
+ isEarlyClobber) // Reject &&&&&&
+ return true;
+ isEarlyClobber = true;
+ break;
+ case '%': // Commutative.
+ if (Type == isClobber || // Cannot commute clobbers.
+ isCommutative) // Reject %%%%%
+ return true;
+ isCommutative = true;
+ break;
+ case '#': // Comment.
+ case '*': // Register preferencing.
+ return true; // Not supported.
+ }
+
+ if (!DoneWithModifiers) {
+ ++I;
+ if (I == E) return true; // Just prefixes and modifiers!
+ }
+ }
+
+ // Parse the various constraints.
+ while (I != E) {
+ if (*I == '{') { // Physical register reference.
+ // Find the end of the register name.
+ StringRef::iterator ConstraintEnd = std::find(I+1, E, '}');
+ if (ConstraintEnd == E) return true; // "{foo"
+ pCodes->push_back(std::string(StringRef(I, ConstraintEnd + 1 - I)));
+ I = ConstraintEnd+1;
+ } else if (isdigit(static_cast<unsigned char>(*I))) { // Matching Constraint
+ // Maximal munch numbers.
+ StringRef::iterator NumStart = I;
+ while (I != E && isdigit(static_cast<unsigned char>(*I)))
+ ++I;
+ pCodes->push_back(std::string(StringRef(NumStart, I - NumStart)));
+ unsigned N = atoi(pCodes->back().c_str());
+ // Check that this is a valid matching constraint!
+ if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput||
+ Type != isInput)
+ return true; // Invalid constraint number.
+
+ // If Operand N already has a matching input, reject this. An output
+ // can't be constrained to the same value as multiple inputs.
+ if (isMultipleAlternative) {
+ if (multipleAlternativeIndex >=
+ ConstraintsSoFar[N].multipleAlternatives.size())
+ return true;
+ InlineAsm::SubConstraintInfo &scInfo =
+ ConstraintsSoFar[N].multipleAlternatives[multipleAlternativeIndex];
+ if (scInfo.MatchingInput != -1)
+ return true;
+ // Note that operand #n has a matching input.
+ scInfo.MatchingInput = ConstraintsSoFar.size();
+ assert(scInfo.MatchingInput >= 0);
+ } else {
+ if (ConstraintsSoFar[N].hasMatchingInput() &&
+ (size_t)ConstraintsSoFar[N].MatchingInput !=
+ ConstraintsSoFar.size())
+ return true;
+ // Note that operand #n has a matching input.
+ ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size();
+ assert(ConstraintsSoFar[N].MatchingInput >= 0);
+ }
+ } else if (*I == '|') {
+ multipleAlternativeIndex++;
+ pCodes = &multipleAlternatives[multipleAlternativeIndex].Codes;
+ ++I;
+ } else if (*I == '^') {
+ // Multi-letter constraint
+ // FIXME: For now assuming these are 2-character constraints.
+ pCodes->push_back(std::string(StringRef(I + 1, 2)));
+ I += 3;
+ } else if (*I == '@') {
+ // Multi-letter constraint
+ ++I;
+ unsigned char C = static_cast<unsigned char>(*I);
+ assert(isdigit(C) && "Expected a digit!");
+ int N = C - '0';
+ assert(N > 0 && "Found a zero letter constraint!");
+ ++I;
+ pCodes->push_back(std::string(StringRef(I, N)));
+ I += N;
+ } else {
+ // Single letter constraint.
+ pCodes->push_back(std::string(StringRef(I, 1)));
+ ++I;
+ }
+ }
+
+ return false;
+}
+
+/// selectAlternative - Point this constraint to the alternative constraint
+/// indicated by the index.
+void InlineAsm::ConstraintInfo::selectAlternative(unsigned index) {
+ if (index < multipleAlternatives.size()) {
+ currentAlternativeIndex = index;
+ InlineAsm::SubConstraintInfo &scInfo =
+ multipleAlternatives[currentAlternativeIndex];
+ MatchingInput = scInfo.MatchingInput;
+ Codes = scInfo.Codes;
+ }
+}
+
+InlineAsm::ConstraintInfoVector
+InlineAsm::ParseConstraints(StringRef Constraints) {
+ ConstraintInfoVector Result;
+
+ // Scan the constraints string.
+ for (StringRef::iterator I = Constraints.begin(),
+ E = Constraints.end(); I != E; ) {
+ ConstraintInfo Info;
+
+ // Find the end of this constraint.
+ StringRef::iterator ConstraintEnd = std::find(I, E, ',');
+
+ if (ConstraintEnd == I || // Empty constraint like ",,"
+ Info.Parse(StringRef(I, ConstraintEnd-I), Result)) {
+ Result.clear(); // Erroneous constraint?
+ break;
+ }
+
+ Result.push_back(Info);
+
+ // ConstraintEnd may be either the next comma or the end of the string. In
+ // the former case, we skip the comma.
+ I = ConstraintEnd;
+ if (I != E) {
+ ++I;
+ if (I == E) {
+ Result.clear();
+ break;
+ } // don't allow "xyz,"
+ }
+ }
+
+ return Result;
+}
+
+static Error makeStringError(const char *Msg) {
+ return createStringError(errc::invalid_argument, Msg);
+}
+
+Error InlineAsm::verify(FunctionType *Ty, StringRef ConstStr) {
+ if (Ty->isVarArg())
+ return makeStringError("inline asm cannot be variadic");
+
+ ConstraintInfoVector Constraints = ParseConstraints(ConstStr);
+
+ // Error parsing constraints.
+ if (Constraints.empty() && !ConstStr.empty())
+ return makeStringError("failed to parse constraints");
+
+ unsigned NumOutputs = 0, NumInputs = 0, NumClobbers = 0;
+ unsigned NumIndirect = 0;
+
+ for (const ConstraintInfo &Constraint : Constraints) {
+ switch (Constraint.Type) {
+ case InlineAsm::isOutput:
+ if ((NumInputs-NumIndirect) != 0 || NumClobbers != 0)
+ return makeStringError("output constraint occurs after input "
+ "or clobber constraint");
+
+ if (!Constraint.isIndirect) {
+ ++NumOutputs;
+ break;
+ }
+ ++NumIndirect;
+ LLVM_FALLTHROUGH; // We fall through for Indirect Outputs.
+ case InlineAsm::isInput:
+ if (NumClobbers)
+ return makeStringError("input constraint occurs after clobber "
+ "constraint");
+ ++NumInputs;
+ break;
+ case InlineAsm::isClobber:
+ ++NumClobbers;
+ break;
+ }
+ }
+
+ switch (NumOutputs) {
+ case 0:
+ if (!Ty->getReturnType()->isVoidTy())
+ return makeStringError("inline asm without outputs must return void");
+ break;
+ case 1:
+ if (Ty->getReturnType()->isStructTy())
+ return makeStringError("inline asm with one output cannot return struct");
+ break;
+ default:
+ StructType *STy = dyn_cast<StructType>(Ty->getReturnType());
+ if (!STy || STy->getNumElements() != NumOutputs)
+ return makeStringError("number of output constraints does not match "
+ "number of return struct elements");
+ break;
+ }
+
+ if (Ty->getNumParams() != NumInputs)
+ return makeStringError("number of input constraints does not match number "
+ "of parameters");
+ return Error::success();
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Instruction.cpp b/contrib/llvm-project/llvm/lib/IR/Instruction.cpp
new file mode 100644
index 000000000000..bf76c89f26ca
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Instruction.cpp
@@ -0,0 +1,878 @@
+//===-- Instruction.cpp - Implement the Instruction class -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Instruction class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Instruction.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+using namespace llvm;
+
+Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
+ Instruction *InsertBefore)
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
+
+ // If requested, insert this instruction into a basic block...
+ if (InsertBefore) {
+ BasicBlock *BB = InsertBefore->getParent();
+ assert(BB && "Instruction to insert before is not in a basic block!");
+ BB->getInstList().insert(InsertBefore->getIterator(), this);
+ }
+}
+
+Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
+ BasicBlock *InsertAtEnd)
+ : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
+
+ // append this instruction into the basic block
+ assert(InsertAtEnd && "Basic block to append to may not be NULL!");
+ InsertAtEnd->getInstList().push_back(this);
+}
+
+Instruction::~Instruction() {
+ assert(!Parent && "Instruction still linked in the program!");
+
+ // Replace any extant metadata uses of this instruction with undef to
+ // preserve debug info accuracy. Some alternatives include:
+ // - Treat Instruction like any other Value, and point its extant metadata
+ // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
+ // trivially dead (i.e. fair game for deletion in many passes), leading to
+ // stale dbg.values being in effect for too long.
+ // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
+ // correct. OTOH results in wasted work in some common cases (e.g. when all
+ // instructions in a BasicBlock are deleted).
+ if (isUsedByMetadata())
+ ValueAsMetadata::handleRAUW(this, UndefValue::get(getType()));
+}
+
+
+void Instruction::setParent(BasicBlock *P) {
+ Parent = P;
+}
+
+const Module *Instruction::getModule() const {
+ return getParent()->getModule();
+}
+
+const Function *Instruction::getFunction() const {
+ return getParent()->getParent();
+}
+
+void Instruction::removeFromParent() {
+ getParent()->getInstList().remove(getIterator());
+}
+
+iplist<Instruction>::iterator Instruction::eraseFromParent() {
+ return getParent()->getInstList().erase(getIterator());
+}
+
+/// Insert an unlinked instruction into a basic block immediately before the
+/// specified instruction.
+void Instruction::insertBefore(Instruction *InsertPos) {
+ InsertPos->getParent()->getInstList().insert(InsertPos->getIterator(), this);
+}
+
+/// Insert an unlinked instruction into a basic block immediately after the
+/// specified instruction.
+void Instruction::insertAfter(Instruction *InsertPos) {
+ InsertPos->getParent()->getInstList().insertAfter(InsertPos->getIterator(),
+ this);
+}
+
+/// Unlink this instruction from its current basic block and insert it into the
+/// basic block that MovePos lives in, right before MovePos.
+void Instruction::moveBefore(Instruction *MovePos) {
+ moveBefore(*MovePos->getParent(), MovePos->getIterator());
+}
+
+void Instruction::moveAfter(Instruction *MovePos) {
+ moveBefore(*MovePos->getParent(), ++MovePos->getIterator());
+}
+
+void Instruction::moveBefore(BasicBlock &BB,
+ SymbolTableList<Instruction>::iterator I) {
+ assert(I == BB.end() || I->getParent() == &BB);
+ BB.getInstList().splice(I, getParent()->getInstList(), getIterator());
+}
+
+bool Instruction::comesBefore(const Instruction *Other) const {
+ assert(Parent && Other->Parent &&
+ "instructions without BB parents have no order");
+ assert(Parent == Other->Parent && "cross-BB instruction order comparison");
+ if (!Parent->isInstrOrderValid())
+ Parent->renumberInstructions();
+ return Order < Other->Order;
+}
+
+bool Instruction::isOnlyUserOfAnyOperand() {
+ return any_of(operands(), [](Value *V) { return V->hasOneUser(); });
+}
+
+void Instruction::setHasNoUnsignedWrap(bool b) {
+ cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(b);
+}
+
+void Instruction::setHasNoSignedWrap(bool b) {
+ cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(b);
+}
+
+void Instruction::setIsExact(bool b) {
+ cast<PossiblyExactOperator>(this)->setIsExact(b);
+}
+
+bool Instruction::hasNoUnsignedWrap() const {
+ return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
+}
+
+bool Instruction::hasNoSignedWrap() const {
+ return cast<OverflowingBinaryOperator>(this)->hasNoSignedWrap();
+}
+
+bool Instruction::hasPoisonGeneratingFlags() const {
+ return cast<Operator>(this)->hasPoisonGeneratingFlags();
+}
+
+void Instruction::dropPoisonGeneratingFlags() {
+ switch (getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl:
+ cast<OverflowingBinaryOperator>(this)->setHasNoUnsignedWrap(false);
+ cast<OverflowingBinaryOperator>(this)->setHasNoSignedWrap(false);
+ break;
+
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::AShr:
+ case Instruction::LShr:
+ cast<PossiblyExactOperator>(this)->setIsExact(false);
+ break;
+
+ case Instruction::GetElementPtr:
+ cast<GetElementPtrInst>(this)->setIsInBounds(false);
+ break;
+ }
+ if (isa<FPMathOperator>(this)) {
+ setHasNoNaNs(false);
+ setHasNoInfs(false);
+ }
+
+ assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
+}
+
+void Instruction::dropUndefImplyingAttrsAndUnknownMetadata(
+ ArrayRef<unsigned> KnownIDs) {
+ dropUnknownNonDebugMetadata(KnownIDs);
+ auto *CB = dyn_cast<CallBase>(this);
+ if (!CB)
+ return;
+ // For call instructions, we also need to drop parameter and return attributes
+ // that are can cause UB if the call is moved to a location where the
+ // attribute is not valid.
+ AttributeList AL = CB->getAttributes();
+ if (AL.isEmpty())
+ return;
+ AttributeMask UBImplyingAttributes =
+ AttributeFuncs::getUBImplyingAttributes();
+ for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
+ CB->removeParamAttrs(ArgNo, UBImplyingAttributes);
+ CB->removeRetAttrs(UBImplyingAttributes);
+}
+
+bool Instruction::isExact() const {
+ return cast<PossiblyExactOperator>(this)->isExact();
+}
+
+void Instruction::setFast(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setFast(B);
+}
+
+void Instruction::setHasAllowReassoc(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasAllowReassoc(B);
+}
+
+void Instruction::setHasNoNaNs(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasNoNaNs(B);
+}
+
+void Instruction::setHasNoInfs(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasNoInfs(B);
+}
+
+void Instruction::setHasNoSignedZeros(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasNoSignedZeros(B);
+}
+
+void Instruction::setHasAllowReciprocal(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasAllowReciprocal(B);
+}
+
+void Instruction::setHasAllowContract(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasAllowContract(B);
+}
+
+void Instruction::setHasApproxFunc(bool B) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setHasApproxFunc(B);
+}
+
+void Instruction::setFastMathFlags(FastMathFlags FMF) {
+ assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->setFastMathFlags(FMF);
+}
+
+void Instruction::copyFastMathFlags(FastMathFlags FMF) {
+ assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
+ cast<FPMathOperator>(this)->copyFastMathFlags(FMF);
+}
+
+bool Instruction::isFast() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->isFast();
+}
+
+bool Instruction::hasAllowReassoc() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasAllowReassoc();
+}
+
+bool Instruction::hasNoNaNs() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasNoNaNs();
+}
+
+bool Instruction::hasNoInfs() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasNoInfs();
+}
+
+bool Instruction::hasNoSignedZeros() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasNoSignedZeros();
+}
+
+bool Instruction::hasAllowReciprocal() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasAllowReciprocal();
+}
+
+bool Instruction::hasAllowContract() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasAllowContract();
+}
+
+bool Instruction::hasApproxFunc() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->hasApproxFunc();
+}
+
+FastMathFlags Instruction::getFastMathFlags() const {
+ assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
+ return cast<FPMathOperator>(this)->getFastMathFlags();
+}
+
+void Instruction::copyFastMathFlags(const Instruction *I) {
+ copyFastMathFlags(I->getFastMathFlags());
+}
+
+void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
+ // Copy the wrapping flags.
+ if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(this)) {
+ if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
+ setHasNoSignedWrap(OB->hasNoSignedWrap());
+ setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
+ }
+ }
+
+ // Copy the exact flag.
+ if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
+ if (isa<PossiblyExactOperator>(this))
+ setIsExact(PE->isExact());
+
+ // Copy the fast-math flags.
+ if (auto *FP = dyn_cast<FPMathOperator>(V))
+ if (isa<FPMathOperator>(this))
+ copyFastMathFlags(FP->getFastMathFlags());
+
+ if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
+ if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
+ DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
+}
+
+void Instruction::andIRFlags(const Value *V) {
+ if (auto *OB = dyn_cast<OverflowingBinaryOperator>(V)) {
+ if (isa<OverflowingBinaryOperator>(this)) {
+ setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
+ setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
+ }
+ }
+
+ if (auto *PE = dyn_cast<PossiblyExactOperator>(V))
+ if (isa<PossiblyExactOperator>(this))
+ setIsExact(isExact() && PE->isExact());
+
+ if (auto *FP = dyn_cast<FPMathOperator>(V)) {
+ if (isa<FPMathOperator>(this)) {
+ FastMathFlags FM = getFastMathFlags();
+ FM &= FP->getFastMathFlags();
+ copyFastMathFlags(FM);
+ }
+ }
+
+ if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(V))
+ if (auto *DestGEP = dyn_cast<GetElementPtrInst>(this))
+ DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
+}
+
+const char *Instruction::getOpcodeName(unsigned OpCode) {
+ switch (OpCode) {
+ // Terminators
+ case Ret: return "ret";
+ case Br: return "br";
+ case Switch: return "switch";
+ case IndirectBr: return "indirectbr";
+ case Invoke: return "invoke";
+ case Resume: return "resume";
+ case Unreachable: return "unreachable";
+ case CleanupRet: return "cleanupret";
+ case CatchRet: return "catchret";
+ case CatchPad: return "catchpad";
+ case CatchSwitch: return "catchswitch";
+ case CallBr: return "callbr";
+
+ // Standard unary operators...
+ case FNeg: return "fneg";
+
+ // Standard binary operators...
+ case Add: return "add";
+ case FAdd: return "fadd";
+ case Sub: return "sub";
+ case FSub: return "fsub";
+ case Mul: return "mul";
+ case FMul: return "fmul";
+ case UDiv: return "udiv";
+ case SDiv: return "sdiv";
+ case FDiv: return "fdiv";
+ case URem: return "urem";
+ case SRem: return "srem";
+ case FRem: return "frem";
+
+ // Logical operators...
+ case And: return "and";
+ case Or : return "or";
+ case Xor: return "xor";
+
+ // Memory instructions...
+ case Alloca: return "alloca";
+ case Load: return "load";
+ case Store: return "store";
+ case AtomicCmpXchg: return "cmpxchg";
+ case AtomicRMW: return "atomicrmw";
+ case Fence: return "fence";
+ case GetElementPtr: return "getelementptr";
+
+ // Convert instructions...
+ case Trunc: return "trunc";
+ case ZExt: return "zext";
+ case SExt: return "sext";
+ case FPTrunc: return "fptrunc";
+ case FPExt: return "fpext";
+ case FPToUI: return "fptoui";
+ case FPToSI: return "fptosi";
+ case UIToFP: return "uitofp";
+ case SIToFP: return "sitofp";
+ case IntToPtr: return "inttoptr";
+ case PtrToInt: return "ptrtoint";
+ case BitCast: return "bitcast";
+ case AddrSpaceCast: return "addrspacecast";
+
+ // Other instructions...
+ case ICmp: return "icmp";
+ case FCmp: return "fcmp";
+ case PHI: return "phi";
+ case Select: return "select";
+ case Call: return "call";
+ case Shl: return "shl";
+ case LShr: return "lshr";
+ case AShr: return "ashr";
+ case VAArg: return "va_arg";
+ case ExtractElement: return "extractelement";
+ case InsertElement: return "insertelement";
+ case ShuffleVector: return "shufflevector";
+ case ExtractValue: return "extractvalue";
+ case InsertValue: return "insertvalue";
+ case LandingPad: return "landingpad";
+ case CleanupPad: return "cleanuppad";
+ case Freeze: return "freeze";
+
+ default: return "<Invalid operator> ";
+ }
+}
+
+/// Return true if both instructions have the same special state. This must be
+/// kept in sync with FunctionComparator::cmpOperations in
+/// lib/Transforms/IPO/MergeFunctions.cpp.
+static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
+ bool IgnoreAlignment = false) {
+ assert(I1->getOpcode() == I2->getOpcode() &&
+ "Can not compare special state of different instructions");
+
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(I1))
+ return AI->getAllocatedType() == cast<AllocaInst>(I2)->getAllocatedType() &&
+ (AI->getAlign() == cast<AllocaInst>(I2)->getAlign() ||
+ IgnoreAlignment);
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
+ return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
+ (LI->getAlign() == cast<LoadInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
+ LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
+ LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
+ if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
+ return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
+ (SI->getAlign() == cast<StoreInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
+ SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
+ SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
+ return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
+ if (const CallInst *CI = dyn_cast<CallInst>(I1))
+ return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
+ CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<CallInst>(I2)->getAttributes() &&
+ CI->hasIdenticalOperandBundleSchema(*cast<CallInst>(I2));
+ if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
+ return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<InvokeInst>(I2)->getAttributes() &&
+ CI->hasIdenticalOperandBundleSchema(*cast<InvokeInst>(I2));
+ if (const CallBrInst *CI = dyn_cast<CallBrInst>(I1))
+ return CI->getCallingConv() == cast<CallBrInst>(I2)->getCallingConv() &&
+ CI->getAttributes() == cast<CallBrInst>(I2)->getAttributes() &&
+ CI->hasIdenticalOperandBundleSchema(*cast<CallBrInst>(I2));
+ if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1))
+ return IVI->getIndices() == cast<InsertValueInst>(I2)->getIndices();
+ if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1))
+ return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
+ if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
+ return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
+ FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
+ if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
+ return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
+ CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
+ CXI->getSuccessOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
+ CXI->getFailureOrdering() ==
+ cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
+ CXI->getSyncScopeID() ==
+ cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
+ if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
+ return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
+ RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
+ RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
+ RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
+ if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
+ return SVI->getShuffleMask() ==
+ cast<ShuffleVectorInst>(I2)->getShuffleMask();
+ if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I1))
+ return GEP->getSourceElementType() ==
+ cast<GetElementPtrInst>(I2)->getSourceElementType();
+
+ return true;
+}
+
+bool Instruction::isIdenticalTo(const Instruction *I) const {
+ return isIdenticalToWhenDefined(I) &&
+ SubclassOptionalData == I->SubclassOptionalData;
+}
+
+bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
+ if (getOpcode() != I->getOpcode() ||
+ getNumOperands() != I->getNumOperands() ||
+ getType() != I->getType())
+ return false;
+
+ // If both instructions have no operands, they are identical.
+ if (getNumOperands() == 0 && I->getNumOperands() == 0)
+ return haveSameSpecialState(this, I);
+
+ // We have two instructions of identical opcode and #operands. Check to see
+ // if all operands are the same.
+ if (!std::equal(op_begin(), op_end(), I->op_begin()))
+ return false;
+
+ // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
+ if (const PHINode *thisPHI = dyn_cast<PHINode>(this)) {
+ const PHINode *otherPHI = cast<PHINode>(I);
+ return std::equal(thisPHI->block_begin(), thisPHI->block_end(),
+ otherPHI->block_begin());
+ }
+
+ return haveSameSpecialState(this, I);
+}
+
+// Keep this in sync with FunctionComparator::cmpOperations in
+// lib/Transforms/IPO/MergeFunctions.cpp.
+bool Instruction::isSameOperationAs(const Instruction *I,
+ unsigned flags) const {
+ bool IgnoreAlignment = flags & CompareIgnoringAlignment;
+ bool UseScalarTypes = flags & CompareUsingScalarTypes;
+
+ if (getOpcode() != I->getOpcode() ||
+ getNumOperands() != I->getNumOperands() ||
+ (UseScalarTypes ?
+ getType()->getScalarType() != I->getType()->getScalarType() :
+ getType() != I->getType()))
+ return false;
+
+ // We have two instructions of identical opcode and #operands. Check to see
+ // if all operands are the same type
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (UseScalarTypes ?
+ getOperand(i)->getType()->getScalarType() !=
+ I->getOperand(i)->getType()->getScalarType() :
+ getOperand(i)->getType() != I->getOperand(i)->getType())
+ return false;
+
+ return haveSameSpecialState(this, I, IgnoreAlignment);
+}
+
+bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
+ for (const Use &U : uses()) {
+ // PHI nodes uses values in the corresponding predecessor block. For other
+ // instructions, just check to see whether the parent of the use matches up.
+ const Instruction *I = cast<Instruction>(U.getUser());
+ const PHINode *PN = dyn_cast<PHINode>(I);
+ if (!PN) {
+ if (I->getParent() != BB)
+ return true;
+ continue;
+ }
+
+ if (PN->getIncomingBlock(U) != BB)
+ return true;
+ }
+ return false;
+}
+
+bool Instruction::mayReadFromMemory() const {
+ switch (getOpcode()) {
+ default: return false;
+ case Instruction::VAArg:
+ case Instruction::Load:
+ case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::CatchPad:
+ case Instruction::CatchRet:
+ return true;
+ case Instruction::Call:
+ case Instruction::Invoke:
+ case Instruction::CallBr:
+ return !cast<CallBase>(this)->onlyWritesMemory();
+ case Instruction::Store:
+ return !cast<StoreInst>(this)->isUnordered();
+ }
+}
+
+bool Instruction::mayWriteToMemory() const {
+ switch (getOpcode()) {
+ default: return false;
+ case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
+ case Instruction::Store:
+ case Instruction::VAArg:
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::CatchPad:
+ case Instruction::CatchRet:
+ return true;
+ case Instruction::Call:
+ case Instruction::Invoke:
+ case Instruction::CallBr:
+ return !cast<CallBase>(this)->onlyReadsMemory();
+ case Instruction::Load:
+ return !cast<LoadInst>(this)->isUnordered();
+ }
+}
+
+bool Instruction::isAtomic() const {
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Fence:
+ return true;
+ case Instruction::Load:
+ return cast<LoadInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
+ case Instruction::Store:
+ return cast<StoreInst>(this)->getOrdering() != AtomicOrdering::NotAtomic;
+ }
+}
+
+bool Instruction::hasAtomicLoad() const {
+ assert(isAtomic());
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Load:
+ return true;
+ }
+}
+
+bool Instruction::hasAtomicStore() const {
+ assert(isAtomic());
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicCmpXchg:
+ case Instruction::AtomicRMW:
+ case Instruction::Store:
+ return true;
+ }
+}
+
+bool Instruction::isVolatile() const {
+ switch (getOpcode()) {
+ default:
+ return false;
+ case Instruction::AtomicRMW:
+ return cast<AtomicRMWInst>(this)->isVolatile();
+ case Instruction::Store:
+ return cast<StoreInst>(this)->isVolatile();
+ case Instruction::Load:
+ return cast<LoadInst>(this)->isVolatile();
+ case Instruction::AtomicCmpXchg:
+ return cast<AtomicCmpXchgInst>(this)->isVolatile();
+ case Instruction::Call:
+ case Instruction::Invoke:
+ // There are a very limited number of intrinsics with volatile flags.
+ if (auto *II = dyn_cast<IntrinsicInst>(this)) {
+ if (auto *MI = dyn_cast<MemIntrinsic>(II))
+ return MI->isVolatile();
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::matrix_column_major_load:
+ return cast<ConstantInt>(II->getArgOperand(2))->isOne();
+ case Intrinsic::matrix_column_major_store:
+ return cast<ConstantInt>(II->getArgOperand(3))->isOne();
+ }
+ }
+ return false;
+ }
+}
+
+bool Instruction::mayThrow() const {
+ if (const CallInst *CI = dyn_cast<CallInst>(this))
+ return !CI->doesNotThrow();
+ if (const auto *CRI = dyn_cast<CleanupReturnInst>(this))
+ return CRI->unwindsToCaller();
+ if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(this))
+ return CatchSwitch->unwindsToCaller();
+ return isa<ResumeInst>(this);
+}
+
+bool Instruction::mayHaveSideEffects() const {
+ return mayWriteToMemory() || mayThrow() || !willReturn();
+}
+
+bool Instruction::isSafeToRemove() const {
+ return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
+ !this->isTerminator() && !this->isEHPad();
+}
+
+bool Instruction::willReturn() const {
+ // Volatile store isn't guaranteed to return; see LangRef.
+ if (auto *SI = dyn_cast<StoreInst>(this))
+ return !SI->isVolatile();
+
+ if (const auto *CB = dyn_cast<CallBase>(this))
+ // FIXME: Temporarily assume that all side-effect free intrinsics will
+ // return. Remove this workaround once all intrinsics are appropriately
+ // annotated.
+ return CB->hasFnAttr(Attribute::WillReturn) ||
+ (isa<IntrinsicInst>(CB) && CB->onlyReadsMemory());
+ return true;
+}
+
+bool Instruction::isLifetimeStartOrEnd() const {
+ auto *II = dyn_cast<IntrinsicInst>(this);
+ if (!II)
+ return false;
+ Intrinsic::ID ID = II->getIntrinsicID();
+ return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
+}
+
+bool Instruction::isLaunderOrStripInvariantGroup() const {
+ auto *II = dyn_cast<IntrinsicInst>(this);
+ if (!II)
+ return false;
+ Intrinsic::ID ID = II->getIntrinsicID();
+ return ID == Intrinsic::launder_invariant_group ||
+ ID == Intrinsic::strip_invariant_group;
+}
+
+bool Instruction::isDebugOrPseudoInst() const {
+ return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
+}
+
+const Instruction *
+Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
+ for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
+ if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
+ return I;
+ return nullptr;
+}
+
+const Instruction *
+Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
+ for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
+ if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
+ return I;
+ return nullptr;
+}
+
+bool Instruction::isAssociative() const {
+ unsigned Opcode = getOpcode();
+ if (isAssociative(Opcode))
+ return true;
+
+ switch (Opcode) {
+ case FMul:
+ case FAdd:
+ return cast<FPMathOperator>(this)->hasAllowReassoc() &&
+ cast<FPMathOperator>(this)->hasNoSignedZeros();
+ default:
+ return false;
+ }
+}
+
+bool Instruction::isCommutative() const {
+ if (auto *II = dyn_cast<IntrinsicInst>(this))
+ return II->isCommutative();
+ // TODO: Should allow icmp/fcmp?
+ return isCommutative(getOpcode());
+}
+
+unsigned Instruction::getNumSuccessors() const {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<const CLASS *>(this)->getNumSuccessors();
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
+
+BasicBlock *Instruction::getSuccessor(unsigned idx) const {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<const CLASS *>(this)->getSuccessor(idx);
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
+
+void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<CLASS *>(this)->setSuccessor(idx, B);
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
+
+void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
+ for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
+ Idx != NumSuccessors; ++Idx)
+ if (getSuccessor(Idx) == OldBB)
+ setSuccessor(Idx, NewBB);
+}
+
+Instruction *Instruction::cloneImpl() const {
+ llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
+}
+
+void Instruction::swapProfMetadata() {
+ MDNode *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (!ProfileData || ProfileData->getNumOperands() != 3 ||
+ !isa<MDString>(ProfileData->getOperand(0)))
+ return;
+
+ MDString *MDName = cast<MDString>(ProfileData->getOperand(0));
+ if (MDName->getString() != "branch_weights")
+ return;
+
+ // The first operand is the name. Fetch them backwards and build a new one.
+ Metadata *Ops[] = {ProfileData->getOperand(0), ProfileData->getOperand(2),
+ ProfileData->getOperand(1)};
+ setMetadata(LLVMContext::MD_prof,
+ MDNode::get(ProfileData->getContext(), Ops));
+}
+
+void Instruction::copyMetadata(const Instruction &SrcInst,
+ ArrayRef<unsigned> WL) {
+ if (!SrcInst.hasMetadata())
+ return;
+
+ DenseSet<unsigned> WLS;
+ for (unsigned M : WL)
+ WLS.insert(M);
+
+ // Otherwise, enumerate and copy over metadata from the old instruction to the
+ // new one.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
+ SrcInst.getAllMetadataOtherThanDebugLoc(TheMDs);
+ for (const auto &MD : TheMDs) {
+ if (WL.empty() || WLS.count(MD.first))
+ setMetadata(MD.first, MD.second);
+ }
+ if (WL.empty() || WLS.count(LLVMContext::MD_dbg))
+ setDebugLoc(SrcInst.getDebugLoc());
+}
+
+Instruction *Instruction::clone() const {
+ Instruction *New = nullptr;
+ switch (getOpcode()) {
+ default:
+ llvm_unreachable("Unhandled Opcode.");
+#define HANDLE_INST(num, opc, clas) \
+ case Instruction::opc: \
+ New = cast<clas>(this)->cloneImpl(); \
+ break;
+#include "llvm/IR/Instruction.def"
+#undef HANDLE_INST
+ }
+
+ New->SubclassOptionalData = SubclassOptionalData;
+ New->copyMetadata(*this);
+ return New;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Instructions.cpp b/contrib/llvm-project/llvm/lib/IR/Instructions.cpp
new file mode 100644
index 000000000000..b333f40f3ce9
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Instructions.cpp
@@ -0,0 +1,4846 @@
+//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements all of the non-inline methods for the LLVM instruction
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Instructions.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TypeSize.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+using namespace llvm;
+
+static cl::opt<bool> DisableI2pP2iOpt(
+ "disable-i2p-p2i-opt", cl::init(false),
+ cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
+
+//===----------------------------------------------------------------------===//
+// AllocaInst Class
+//===----------------------------------------------------------------------===//
+
+Optional<TypeSize>
+AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
+ TypeSize Size = DL.getTypeAllocSizeInBits(getAllocatedType());
+ if (isArrayAllocation()) {
+ auto *C = dyn_cast<ConstantInt>(getArraySize());
+ if (!C)
+ return None;
+ assert(!Size.isScalable() && "Array elements cannot have a scalable size");
+ Size *= C->getZExtValue();
+ }
+ return Size;
+}
+
+//===----------------------------------------------------------------------===//
+// SelectInst Class
+//===----------------------------------------------------------------------===//
+
+/// areInvalidOperands - Return a string if the specified operands are invalid
+/// for a select operation, otherwise return null.
+const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
+ if (Op1->getType() != Op2->getType())
+ return "both values to select must have same type";
+
+ if (Op1->getType()->isTokenTy())
+ return "select values cannot have token type";
+
+ if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
+ // Vector select.
+ if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
+ return "vector select condition element type must be i1";
+ VectorType *ET = dyn_cast<VectorType>(Op1->getType());
+ if (!ET)
+ return "selected values for vector select must be vectors";
+ if (ET->getElementCount() != VT->getElementCount())
+ return "vector select requires selected vectors to have "
+ "the same vector length as select condition";
+ } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
+ return "select condition must be i1 or <n x i1>";
+ }
+ return nullptr;
+}
+
+//===----------------------------------------------------------------------===//
+// PHINode Class
+//===----------------------------------------------------------------------===//
+
+PHINode::PHINode(const PHINode &PN)
+ : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
+ ReservedSpace(PN.getNumOperands()) {
+ allocHungoffUses(PN.getNumOperands());
+ std::copy(PN.op_begin(), PN.op_end(), op_begin());
+ std::copy(PN.block_begin(), PN.block_end(), block_begin());
+ SubclassOptionalData = PN.SubclassOptionalData;
+}
+
+// removeIncomingValue - Remove an incoming value. This is useful if a
+// predecessor basic block is deleted.
+Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
+ Value *Removed = getIncomingValue(Idx);
+
+ // Move everything after this operand down.
+ //
+ // FIXME: we could just swap with the end of the list, then erase. However,
+ // clients might not expect this to happen. The code as it is thrashes the
+ // use/def lists, which is kinda lame.
+ std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
+ std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
+
+ // Nuke the last value.
+ Op<-1>().set(nullptr);
+ setNumHungOffUseOperands(getNumOperands() - 1);
+
+ // If the PHI node is dead, because it has zero entries, nuke it now.
+ if (getNumOperands() == 0 && DeletePHIIfEmpty) {
+ // If anyone is using this PHI, make them use a dummy value instead...
+ replaceAllUsesWith(PoisonValue::get(getType()));
+ eraseFromParent();
+ }
+ return Removed;
+}
+
+/// growOperands - grow operands - This grows the operand list in response
+/// to a push_back style of operation. This grows the number of ops by 1.5
+/// times.
+///
+void PHINode::growOperands() {
+ unsigned e = getNumOperands();
+ unsigned NumOps = e + e / 2;
+ if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
+
+ ReservedSpace = NumOps;
+ growHungoffUses(ReservedSpace, /* IsPhi */ true);
+}
+
+/// hasConstantValue - If the specified PHI node always merges together the same
+/// value, return the value, otherwise return null.
+Value *PHINode::hasConstantValue() const {
+ // Exploit the fact that phi nodes always have at least one entry.
+ Value *ConstantValue = getIncomingValue(0);
+ for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
+ if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
+ if (ConstantValue != this)
+ return nullptr; // Incoming values not all the same.
+ // The case where the first value is this PHI.
+ ConstantValue = getIncomingValue(i);
+ }
+ if (ConstantValue == this)
+ return UndefValue::get(getType());
+ return ConstantValue;
+}
+
+/// hasConstantOrUndefValue - Whether the specified PHI node always merges
+/// together the same value, assuming that undefs result in the same value as
+/// non-undefs.
+/// Unlike \ref hasConstantValue, this does not return a value because the
+/// unique non-undef incoming value need not dominate the PHI node.
+bool PHINode::hasConstantOrUndefValue() const {
+ Value *ConstantValue = nullptr;
+ for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming = getIncomingValue(i);
+ if (Incoming != this && !isa<UndefValue>(Incoming)) {
+ if (ConstantValue && ConstantValue != Incoming)
+ return false;
+ ConstantValue = Incoming;
+ }
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// LandingPadInst Implementation
+//===----------------------------------------------------------------------===//
+
+LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
+ init(NumReservedValues, NameStr);
+}
+
+LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
+ init(NumReservedValues, NameStr);
+}
+
+LandingPadInst::LandingPadInst(const LandingPadInst &LP)
+ : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
+ LP.getNumOperands()),
+ ReservedSpace(LP.getNumOperands()) {
+ allocHungoffUses(LP.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = LP.getOperandList();
+ for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
+ OL[I] = InOL[I];
+
+ setCleanup(LP.isCleanup());
+}
+
+LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
+ const Twine &NameStr,
+ Instruction *InsertBefore) {
+ return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
+}
+
+LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
+}
+
+void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
+ ReservedSpace = NumReservedValues;
+ setNumHungOffUseOperands(0);
+ allocHungoffUses(ReservedSpace);
+ setName(NameStr);
+ setCleanup(false);
+}
+
+/// growOperands - grow operands - This grows the operand list in response to a
+/// push_back style of operation. This grows the number of ops by 2 times.
+void LandingPadInst::growOperands(unsigned Size) {
+ unsigned e = getNumOperands();
+ if (ReservedSpace >= e + Size) return;
+ ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
+ growHungoffUses(ReservedSpace);
+}
+
+void LandingPadInst::addClause(Constant *Val) {
+ unsigned OpNo = getNumOperands();
+ growOperands(1);
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(getNumOperands() + 1);
+ getOperandList()[OpNo] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// CallBase Implementation
+//===----------------------------------------------------------------------===//
+
+CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
+ Instruction *InsertPt) {
+ switch (CB->getOpcode()) {
+ case Instruction::Call:
+ return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
+ case Instruction::Invoke:
+ return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
+ case Instruction::CallBr:
+ return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
+ default:
+ llvm_unreachable("Unknown CallBase sub-class!");
+ }
+}
+
+CallBase *CallBase::Create(CallBase *CI, OperandBundleDef OpB,
+ Instruction *InsertPt) {
+ SmallVector<OperandBundleDef, 2> OpDefs;
+ for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
+ auto ChildOB = CI->getOperandBundleAt(i);
+ if (ChildOB.getTagName() != OpB.getTag())
+ OpDefs.emplace_back(ChildOB);
+ }
+ OpDefs.emplace_back(OpB);
+ return CallBase::Create(CI, OpDefs, InsertPt);
+}
+
+
+Function *CallBase::getCaller() { return getParent()->getParent(); }
+
+unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
+ assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
+ return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
+}
+
+bool CallBase::isIndirectCall() const {
+ const Value *V = getCalledOperand();
+ if (isa<Function>(V) || isa<Constant>(V))
+ return false;
+ return !isInlineAsm();
+}
+
+/// Tests if this call site must be tail call optimized. Only a CallInst can
+/// be tail call optimized.
+bool CallBase::isMustTailCall() const {
+ if (auto *CI = dyn_cast<CallInst>(this))
+ return CI->isMustTailCall();
+ return false;
+}
+
+/// Tests if this call site is marked as a tail call.
+bool CallBase::isTailCall() const {
+ if (auto *CI = dyn_cast<CallInst>(this))
+ return CI->isTailCall();
+ return false;
+}
+
+Intrinsic::ID CallBase::getIntrinsicID() const {
+ if (auto *F = getCalledFunction())
+ return F->getIntrinsicID();
+ return Intrinsic::not_intrinsic;
+}
+
+bool CallBase::isReturnNonNull() const {
+ if (hasRetAttr(Attribute::NonNull))
+ return true;
+
+ if (getRetDereferenceableBytes() > 0 &&
+ !NullPointerIsDefined(getCaller(), getType()->getPointerAddressSpace()))
+ return true;
+
+ return false;
+}
+
+Value *CallBase::getArgOperandWithAttribute(Attribute::AttrKind Kind) const {
+ unsigned Index;
+
+ if (Attrs.hasAttrSomewhere(Kind, &Index))
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
+ if (const Function *F = getCalledFunction())
+ if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
+
+ return nullptr;
+}
+
+/// Determine whether the argument or parameter has the given attribute.
+bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+ assert(ArgNo < arg_size() && "Param index out of bounds!");
+
+ if (Attrs.hasParamAttr(ArgNo, Kind))
+ return true;
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasParamAttr(ArgNo, Kind);
+ return false;
+}
+
+bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
+ Value *V = getCalledOperand();
+ if (auto *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == BitCast)
+ V = CE->getOperand(0);
+
+ if (auto *F = dyn_cast<Function>(V))
+ return F->getAttributes().hasFnAttr(Kind);
+
+ return false;
+}
+
+bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
+ Value *V = getCalledOperand();
+ if (auto *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == BitCast)
+ V = CE->getOperand(0);
+
+ if (auto *F = dyn_cast<Function>(V))
+ return F->getAttributes().hasFnAttr(Kind);
+
+ return false;
+}
+
+template <typename AK>
+Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
+ // Operand bundles override attributes on the called function, but don't
+ // override attributes directly present on the call instruction.
+ if (isFnAttrDisallowedByOpBundle(Kind))
+ return Attribute();
+ Value *V = getCalledOperand();
+ if (auto *CE = dyn_cast<ConstantExpr>(V))
+ if (CE->getOpcode() == BitCast)
+ V = CE->getOperand(0);
+
+ if (auto *F = dyn_cast<Function>(V))
+ return F->getAttributes().getFnAttr(Kind);
+
+ return Attribute();
+}
+
+template Attribute
+CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
+template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
+
+void CallBase::getOperandBundlesAsDefs(
+ SmallVectorImpl<OperandBundleDef> &Defs) const {
+ for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+ Defs.emplace_back(getOperandBundleAt(i));
+}
+
+CallBase::op_iterator
+CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
+ const unsigned BeginIndex) {
+ auto It = op_begin() + BeginIndex;
+ for (auto &B : Bundles)
+ It = std::copy(B.input_begin(), B.input_end(), It);
+
+ auto *ContextImpl = getContext().pImpl;
+ auto BI = Bundles.begin();
+ unsigned CurrentIndex = BeginIndex;
+
+ for (auto &BOI : bundle_op_infos()) {
+ assert(BI != Bundles.end() && "Incorrect allocation?");
+
+ BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
+ BOI.Begin = CurrentIndex;
+ BOI.End = CurrentIndex + BI->input_size();
+ CurrentIndex = BOI.End;
+ BI++;
+ }
+
+ assert(BI == Bundles.end() && "Incorrect allocation?");
+
+ return It;
+}
+
+CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
+ /// When there isn't many bundles, we do a simple linear search.
+ /// Else fallback to a binary-search that use the fact that bundles usually
+ /// have similar number of argument to get faster convergence.
+ if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
+ for (auto &BOI : bundle_op_infos())
+ if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
+ return BOI;
+
+ llvm_unreachable("Did not find operand bundle for operand!");
+ }
+
+ assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
+ assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
+ OpIdx < std::prev(bundle_op_info_end())->End &&
+ "The Idx isn't in the operand bundle");
+
+ /// We need a decimal number below and to prevent using floating point numbers
+ /// we use an intergal value multiplied by this constant.
+ constexpr unsigned NumberScaling = 1024;
+
+ bundle_op_iterator Begin = bundle_op_info_begin();
+ bundle_op_iterator End = bundle_op_info_end();
+ bundle_op_iterator Current = Begin;
+
+ while (Begin != End) {
+ unsigned ScaledOperandPerBundle =
+ NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
+ Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
+ ScaledOperandPerBundle);
+ if (Current >= End)
+ Current = std::prev(End);
+ assert(Current < End && Current >= Begin &&
+ "the operand bundle doesn't cover every value in the range");
+ if (OpIdx >= Current->Begin && OpIdx < Current->End)
+ break;
+ if (OpIdx >= Current->End)
+ Begin = Current + 1;
+ else
+ End = Current;
+ }
+
+ assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
+ "the operand bundle doesn't cover every value in the range");
+ return *Current;
+}
+
+CallBase *CallBase::addOperandBundle(CallBase *CB, uint32_t ID,
+ OperandBundleDef OB,
+ Instruction *InsertPt) {
+ if (CB->getOperandBundle(ID))
+ return CB;
+
+ SmallVector<OperandBundleDef, 1> Bundles;
+ CB->getOperandBundlesAsDefs(Bundles);
+ Bundles.push_back(OB);
+ return Create(CB, Bundles, InsertPt);
+}
+
+CallBase *CallBase::removeOperandBundle(CallBase *CB, uint32_t ID,
+ Instruction *InsertPt) {
+ SmallVector<OperandBundleDef, 1> Bundles;
+ bool CreateNew = false;
+
+ for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
+ auto Bundle = CB->getOperandBundleAt(I);
+ if (Bundle.getTagID() == ID) {
+ CreateNew = true;
+ continue;
+ }
+ Bundles.emplace_back(Bundle);
+ }
+
+ return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
+}
+
+bool CallBase::hasReadingOperandBundles() const {
+ // Implementation note: this is a conservative implementation of operand
+ // bundle semantics, where *any* non-assume operand bundle (other than
+ // ptrauth) forces a callsite to be at least readonly.
+ return hasOperandBundlesOtherThan(LLVMContext::OB_ptrauth) &&
+ getIntrinsicID() != Intrinsic::assume;
+}
+
+//===----------------------------------------------------------------------===//
+// CallInst Implementation
+//===----------------------------------------------------------------------===//
+
+void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
+ this->FTy = FTy;
+ assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
+ "NumOperands not set up?");
+
+#ifndef NDEBUG
+ assert((Args.size() == FTy->getNumParams() ||
+ (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
+ "Calling a function with bad signature!");
+
+ for (unsigned i = 0; i != Args.size(); ++i)
+ assert((i >= FTy->getNumParams() ||
+ FTy->getParamType(i) == Args[i]->getType()) &&
+ "Calling a function with a bad signature!");
+#endif
+
+ // Set operands in order of their index to match use-list-order
+ // prediction.
+ llvm::copy(Args, op_begin());
+ setCalledOperand(Func);
+
+ auto It = populateBundleOperandInfos(Bundles, Args.size());
+ (void)It;
+ assert(It + 1 == op_end() && "Should add up!");
+
+ setName(NameStr);
+}
+
+void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
+ this->FTy = FTy;
+ assert(getNumOperands() == 1 && "NumOperands not set up?");
+ setCalledOperand(Func);
+
+ assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
+
+ setName(NameStr);
+}
+
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+ Instruction *InsertBefore)
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
+ init(Ty, Func, Name);
+}
+
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
+ init(Ty, Func, Name);
+}
+
+CallInst::CallInst(const CallInst &CI)
+ : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
+ CI.getNumOperands()) {
+ setTailCallKind(CI.getTailCallKind());
+ setCallingConv(CI.getCallingConv());
+
+ std::copy(CI.op_begin(), CI.op_end(), op_begin());
+ std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
+ bundle_op_info_begin());
+ SubclassOptionalData = CI.SubclassOptionalData;
+}
+
+CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
+ Instruction *InsertPt) {
+ std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
+
+ auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
+ Args, OpB, CI->getName(), InsertPt);
+ NewCI->setTailCallKind(CI->getTailCallKind());
+ NewCI->setCallingConv(CI->getCallingConv());
+ NewCI->SubclassOptionalData = CI->SubclassOptionalData;
+ NewCI->setAttributes(CI->getAttributes());
+ NewCI->setDebugLoc(CI->getDebugLoc());
+ return NewCI;
+}
+
+// Update profile weight for call instruction by scaling it using the ratio
+// of S/T. The meaning of "branch_weights" meta data for call instruction is
+// transfered to represent call count.
+void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
+ auto *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (ProfileData == nullptr)
+ return;
+
+ auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
+ if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
+ !ProfDataName->getString().equals("VP")))
+ return;
+
+ if (T == 0) {
+ LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
+ "div by 0. Ignoring. Likely the function "
+ << getParent()->getParent()->getName()
+ << " has 0 entry count, and contains call instructions "
+ "with non-zero prof info.");
+ return;
+ }
+
+ MDBuilder MDB(getContext());
+ SmallVector<Metadata *, 3> Vals;
+ Vals.push_back(ProfileData->getOperand(0));
+ APInt APS(128, S), APT(128, T);
+ if (ProfDataName->getString().equals("branch_weights") &&
+ ProfileData->getNumOperands() > 0) {
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
+ ->getValue()
+ .getZExtValue());
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt32Ty(getContext()),
+ Val.udiv(APT).getLimitedValue(UINT32_MAX))));
+ } else if (ProfDataName->getString().equals("VP"))
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
+ // The first value is the key of the value profile, which will not change.
+ Vals.push_back(ProfileData->getOperand(i));
+ uint64_t Count =
+ mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
+ ->getValue()
+ .getZExtValue();
+ // Don't scale the magic number.
+ if (Count == NOMORE_ICP_MAGICNUM) {
+ Vals.push_back(ProfileData->getOperand(i + 1));
+ continue;
+ }
+ // Using APInt::div may be expensive, but most cases should fit 64 bits.
+ APInt Val(128, Count);
+ Val *= APS;
+ Vals.push_back(MDB.createConstant(
+ ConstantInt::get(Type::getInt64Ty(getContext()),
+ Val.udiv(APT).getLimitedValue())));
+ }
+ setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
+}
+
+/// IsConstantOne - Return true only if val is constant int 1
+static bool IsConstantOne(Value *val) {
+ assert(val && "IsConstantOne does not work with nullptr val");
+ const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
+ return CVal && CVal->isOne();
+}
+
+static Instruction *createMalloc(Instruction *InsertBefore,
+ BasicBlock *InsertAtEnd, Type *IntPtrTy,
+ Type *AllocTy, Value *AllocSize,
+ Value *ArraySize,
+ ArrayRef<OperandBundleDef> OpB,
+ Function *MallocF, const Twine &Name) {
+ assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
+ "createMalloc needs either InsertBefore or InsertAtEnd");
+
+ // malloc(type) becomes:
+ // bitcast (i8* malloc(typeSize)) to type*
+ // malloc(type, arraySize) becomes:
+ // bitcast (i8* malloc(typeSize*arraySize)) to type*
+ if (!ArraySize)
+ ArraySize = ConstantInt::get(IntPtrTy, 1);
+ else if (ArraySize->getType() != IntPtrTy) {
+ if (InsertBefore)
+ ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
+ "", InsertBefore);
+ else
+ ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
+ "", InsertAtEnd);
+ }
+
+ if (!IsConstantOne(ArraySize)) {
+ if (IsConstantOne(AllocSize)) {
+ AllocSize = ArraySize; // Operand * 1 = Operand
+ } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
+ Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
+ false /*ZExt*/);
+ // Malloc arg is constant product of type size and array size
+ AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
+ } else {
+ // Multiply type size by the array size...
+ if (InsertBefore)
+ AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
+ "mallocsize", InsertBefore);
+ else
+ AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
+ "mallocsize", InsertAtEnd);
+ }
+ }
+
+ assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
+ // Create the call to Malloc.
+ BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
+ Module *M = BB->getParent()->getParent();
+ Type *BPTy = Type::getInt8PtrTy(BB->getContext());
+ FunctionCallee MallocFunc = MallocF;
+ if (!MallocFunc)
+ // prototype malloc as "void *malloc(size_t)"
+ MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
+ PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
+ CallInst *MCall = nullptr;
+ Instruction *Result = nullptr;
+ if (InsertBefore) {
+ MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
+ InsertBefore);
+ Result = MCall;
+ if (Result->getType() != AllocPtrType)
+ // Create a cast instruction to convert to the right type...
+ Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
+ } else {
+ MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
+ Result = MCall;
+ if (Result->getType() != AllocPtrType) {
+ InsertAtEnd->getInstList().push_back(MCall);
+ // Create a cast instruction to convert to the right type...
+ Result = new BitCastInst(MCall, AllocPtrType, Name);
+ }
+ }
+ MCall->setTailCall();
+ if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
+ MCall->setCallingConv(F->getCallingConv());
+ if (!F->returnDoesNotAlias())
+ F->setReturnDoesNotAlias();
+ }
+ assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
+
+ return Result;
+}
+
+/// CreateMalloc - Generate the IR for a call to malloc:
+/// 1. Compute the malloc call's argument as the specified type's size,
+/// possibly multiplied by the array size if the array size is not
+/// constant 1.
+/// 2. Call malloc with that argument.
+/// 3. Bitcast the result of the malloc call to the specified type.
+Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize,
+ Function *MallocF,
+ const Twine &Name) {
+ return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
+ ArraySize, None, MallocF, Name);
+}
+Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize,
+ ArrayRef<OperandBundleDef> OpB,
+ Function *MallocF,
+ const Twine &Name) {
+ return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
+ ArraySize, OpB, MallocF, Name);
+}
+
+/// CreateMalloc - Generate the IR for a call to malloc:
+/// 1. Compute the malloc call's argument as the specified type's size,
+/// possibly multiplied by the array size if the array size is not
+/// constant 1.
+/// 2. Call malloc with that argument.
+/// 3. Bitcast the result of the malloc call to the specified type.
+/// Note: This function does not add the bitcast to the basic block, that is the
+/// responsibility of the caller.
+Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize,
+ Function *MallocF, const Twine &Name) {
+ return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
+ ArraySize, None, MallocF, Name);
+}
+Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize,
+ ArrayRef<OperandBundleDef> OpB,
+ Function *MallocF, const Twine &Name) {
+ return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
+ ArraySize, OpB, MallocF, Name);
+}
+
+static Instruction *createFree(Value *Source,
+ ArrayRef<OperandBundleDef> Bundles,
+ Instruction *InsertBefore,
+ BasicBlock *InsertAtEnd) {
+ assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
+ "createFree needs either InsertBefore or InsertAtEnd");
+ assert(Source->getType()->isPointerTy() &&
+ "Can not free something of nonpointer type!");
+
+ BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
+ Module *M = BB->getParent()->getParent();
+
+ Type *VoidTy = Type::getVoidTy(M->getContext());
+ Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
+ // prototype free as "void free(void*)"
+ FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
+ CallInst *Result = nullptr;
+ Value *PtrCast = Source;
+ if (InsertBefore) {
+ if (Source->getType() != IntPtrTy)
+ PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
+ Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
+ } else {
+ if (Source->getType() != IntPtrTy)
+ PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
+ Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
+ }
+ Result->setTailCall();
+ if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
+ Result->setCallingConv(F->getCallingConv());
+
+ return Result;
+}
+
+/// CreateFree - Generate the IR for a call to the builtin free function.
+Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
+ return createFree(Source, None, InsertBefore, nullptr);
+}
+Instruction *CallInst::CreateFree(Value *Source,
+ ArrayRef<OperandBundleDef> Bundles,
+ Instruction *InsertBefore) {
+ return createFree(Source, Bundles, InsertBefore, nullptr);
+}
+
+/// CreateFree - Generate the IR for a call to the builtin free function.
+/// Note: This function does not add the call to the basic block, that is the
+/// responsibility of the caller.
+Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
+ Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
+ assert(FreeCall && "CreateFree did not create a CallInst");
+ return FreeCall;
+}
+Instruction *CallInst::CreateFree(Value *Source,
+ ArrayRef<OperandBundleDef> Bundles,
+ BasicBlock *InsertAtEnd) {
+ Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
+ assert(FreeCall && "CreateFree did not create a CallInst");
+ return FreeCall;
+}
+
+//===----------------------------------------------------------------------===//
+// InvokeInst Implementation
+//===----------------------------------------------------------------------===//
+
+void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr) {
+ this->FTy = FTy;
+
+ assert((int)getNumOperands() ==
+ ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
+ "NumOperands not set up?");
+
+#ifndef NDEBUG
+ assert(((Args.size() == FTy->getNumParams()) ||
+ (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
+ "Invoking a function with bad signature");
+
+ for (unsigned i = 0, e = Args.size(); i != e; i++)
+ assert((i >= FTy->getNumParams() ||
+ FTy->getParamType(i) == Args[i]->getType()) &&
+ "Invoking a function with a bad signature!");
+#endif
+
+ // Set operands in order of their index to match use-list-order
+ // prediction.
+ llvm::copy(Args, op_begin());
+ setNormalDest(IfNormal);
+ setUnwindDest(IfException);
+ setCalledOperand(Fn);
+
+ auto It = populateBundleOperandInfos(Bundles, Args.size());
+ (void)It;
+ assert(It + 3 == op_end() && "Should add up!");
+
+ setName(NameStr);
+}
+
+InvokeInst::InvokeInst(const InvokeInst &II)
+ : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
+ OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
+ II.getNumOperands()) {
+ setCallingConv(II.getCallingConv());
+ std::copy(II.op_begin(), II.op_end(), op_begin());
+ std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
+ bundle_op_info_begin());
+ SubclassOptionalData = II.SubclassOptionalData;
+}
+
+InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
+ Instruction *InsertPt) {
+ std::vector<Value *> Args(II->arg_begin(), II->arg_end());
+
+ auto *NewII = InvokeInst::Create(
+ II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
+ II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
+ NewII->setCallingConv(II->getCallingConv());
+ NewII->SubclassOptionalData = II->SubclassOptionalData;
+ NewII->setAttributes(II->getAttributes());
+ NewII->setDebugLoc(II->getDebugLoc());
+ return NewII;
+}
+
+LandingPadInst *InvokeInst::getLandingPadInst() const {
+ return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
+}
+
+//===----------------------------------------------------------------------===//
+// CallBrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
+ ArrayRef<BasicBlock *> IndirectDests,
+ ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr) {
+ this->FTy = FTy;
+
+ assert((int)getNumOperands() ==
+ ComputeNumOperands(Args.size(), IndirectDests.size(),
+ CountBundleInputs(Bundles)) &&
+ "NumOperands not set up?");
+
+#ifndef NDEBUG
+ assert(((Args.size() == FTy->getNumParams()) ||
+ (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
+ "Calling a function with bad signature");
+
+ for (unsigned i = 0, e = Args.size(); i != e; i++)
+ assert((i >= FTy->getNumParams() ||
+ FTy->getParamType(i) == Args[i]->getType()) &&
+ "Calling a function with a bad signature!");
+#endif
+
+ // Set operands in order of their index to match use-list-order
+ // prediction.
+ std::copy(Args.begin(), Args.end(), op_begin());
+ NumIndirectDests = IndirectDests.size();
+ setDefaultDest(Fallthrough);
+ for (unsigned i = 0; i != NumIndirectDests; ++i)
+ setIndirectDest(i, IndirectDests[i]);
+ setCalledOperand(Fn);
+
+ auto It = populateBundleOperandInfos(Bundles, Args.size());
+ (void)It;
+ assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
+
+ setName(NameStr);
+}
+
+void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) {
+ assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr");
+ if (BasicBlock *OldBB = getIndirectDest(i)) {
+ BlockAddress *Old = BlockAddress::get(OldBB);
+ BlockAddress *New = BlockAddress::get(B);
+ for (unsigned ArgNo = 0, e = arg_size(); ArgNo != e; ++ArgNo)
+ if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old)
+ setArgOperand(ArgNo, New);
+ }
+}
+
+CallBrInst::CallBrInst(const CallBrInst &CBI)
+ : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
+ OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
+ CBI.getNumOperands()) {
+ setCallingConv(CBI.getCallingConv());
+ std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
+ std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
+ bundle_op_info_begin());
+ SubclassOptionalData = CBI.SubclassOptionalData;
+ NumIndirectDests = CBI.NumIndirectDests;
+}
+
+CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
+ Instruction *InsertPt) {
+ std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
+
+ auto *NewCBI = CallBrInst::Create(
+ CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
+ CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
+ NewCBI->setCallingConv(CBI->getCallingConv());
+ NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
+ NewCBI->setAttributes(CBI->getAttributes());
+ NewCBI->setDebugLoc(CBI->getDebugLoc());
+ NewCBI->NumIndirectDests = CBI->NumIndirectDests;
+ return NewCBI;
+}
+
+//===----------------------------------------------------------------------===//
+// ReturnInst Implementation
+//===----------------------------------------------------------------------===//
+
+ReturnInst::ReturnInst(const ReturnInst &RI)
+ : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
+ RI.getNumOperands()) {
+ if (RI.getNumOperands())
+ Op<0>() = RI.Op<0>();
+ SubclassOptionalData = RI.SubclassOptionalData;
+}
+
+ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(C), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
+ InsertBefore) {
+ if (retVal)
+ Op<0>() = retVal;
+}
+
+ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(C), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
+ InsertAtEnd) {
+ if (retVal)
+ Op<0>() = retVal;
+}
+
+ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(Context), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
+
+//===----------------------------------------------------------------------===//
+// ResumeInst Implementation
+//===----------------------------------------------------------------------===//
+
+ResumeInst::ResumeInst(const ResumeInst &RI)
+ : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1) {
+ Op<0>() = RI.Op<0>();
+}
+
+ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
+ Op<0>() = Exn;
+}
+
+ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
+ Op<0>() = Exn;
+}
+
+//===----------------------------------------------------------------------===//
+// CleanupReturnInst Implementation
+//===----------------------------------------------------------------------===//
+
+CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
+ : Instruction(CRI.getType(), Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) -
+ CRI.getNumOperands(),
+ CRI.getNumOperands()) {
+ setSubclassData<Instruction::OpaqueField>(
+ CRI.getSubclassData<Instruction::OpaqueField>());
+ Op<0>() = CRI.Op<0>();
+ if (CRI.hasUnwindDest())
+ Op<1>() = CRI.Op<1>();
+}
+
+void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
+ if (UnwindBB)
+ setSubclassData<UnwindDestField>(true);
+
+ Op<0>() = CleanupPad;
+ if (UnwindBB)
+ Op<1>() = UnwindBB;
+}
+
+CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
+ unsigned Values, Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(CleanupPad->getContext()),
+ Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) - Values,
+ Values, InsertBefore) {
+ init(CleanupPad, UnwindBB);
+}
+
+CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
+ unsigned Values, BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(CleanupPad->getContext()),
+ Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) - Values,
+ Values, InsertAtEnd) {
+ init(CleanupPad, UnwindBB);
+}
+
+//===----------------------------------------------------------------------===//
+// CatchReturnInst Implementation
+//===----------------------------------------------------------------------===//
+void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
+ Op<0>() = CatchPad;
+ Op<1>() = BB;
+}
+
+CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
+ : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2) {
+ Op<0>() = CRI.Op<0>();
+ Op<1>() = CRI.Op<1>();
+}
+
+CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2,
+ InsertBefore) {
+ init(CatchPad, BB);
+}
+
+CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2,
+ InsertAtEnd) {
+ init(CatchPad, BB);
+}
+
+//===----------------------------------------------------------------------===//
+// CatchSwitchInst Implementation
+//===----------------------------------------------------------------------===//
+
+CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues,
+ const Twine &NameStr,
+ Instruction *InsertBefore)
+ : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertBefore) {
+ if (UnwindDest)
+ ++NumReservedValues;
+ init(ParentPad, UnwindDest, NumReservedValues + 1);
+ setName(NameStr);
+}
+
+CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertAtEnd) {
+ if (UnwindDest)
+ ++NumReservedValues;
+ init(ParentPad, UnwindDest, NumReservedValues + 1);
+ setName(NameStr);
+}
+
+CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
+ : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
+ CSI.getNumOperands()) {
+ init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
+ setNumHungOffUseOperands(ReservedSpace);
+ Use *OL = getOperandList();
+ const Use *InOL = CSI.getOperandList();
+ for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
+ OL[I] = InOL[I];
+}
+
+void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumReservedValues) {
+ assert(ParentPad && NumReservedValues);
+
+ ReservedSpace = NumReservedValues;
+ setNumHungOffUseOperands(UnwindDest ? 2 : 1);
+ allocHungoffUses(ReservedSpace);
+
+ Op<0>() = ParentPad;
+ if (UnwindDest) {
+ setSubclassData<UnwindDestField>(true);
+ setUnwindDest(UnwindDest);
+ }
+}
+
+/// growOperands - grow operands - This grows the operand list in response to a
+/// push_back style of operation. This grows the number of ops by 2 times.
+void CatchSwitchInst::growOperands(unsigned Size) {
+ unsigned NumOperands = getNumOperands();
+ assert(NumOperands >= 1);
+ if (ReservedSpace >= NumOperands + Size)
+ return;
+ ReservedSpace = (NumOperands + Size / 2) * 2;
+ growHungoffUses(ReservedSpace);
+}
+
+void CatchSwitchInst::addHandler(BasicBlock *Handler) {
+ unsigned OpNo = getNumOperands();
+ growOperands(1);
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(getNumOperands() + 1);
+ getOperandList()[OpNo] = Handler;
+}
+
+void CatchSwitchInst::removeHandler(handler_iterator HI) {
+ // Move all subsequent handlers up one.
+ Use *EndDst = op_end() - 1;
+ for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
+ *CurDst = *(CurDst + 1);
+ // Null out the last handler use.
+ *EndDst = nullptr;
+
+ setNumHungOffUseOperands(getNumOperands() - 1);
+}
+
+//===----------------------------------------------------------------------===//
+// FuncletPadInst Implementation
+//===----------------------------------------------------------------------===//
+void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
+ const Twine &NameStr) {
+ assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
+ llvm::copy(Args, op_begin());
+ setParentPad(ParentPad);
+ setName(NameStr);
+}
+
+FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
+ : Instruction(FPI.getType(), FPI.getOpcode(),
+ OperandTraits<FuncletPadInst>::op_end(this) -
+ FPI.getNumOperands(),
+ FPI.getNumOperands()) {
+ std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
+ setParentPad(FPI.getParentPad());
+}
+
+FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : Instruction(ParentPad->getType(), Op,
+ OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
+ InsertBefore) {
+ init(ParentPad, Args, NameStr);
+}
+
+FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : Instruction(ParentPad->getType(), Op,
+ OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
+ InsertAtEnd) {
+ init(ParentPad, Args, NameStr);
+}
+
+//===----------------------------------------------------------------------===//
+// UnreachableInst Implementation
+//===----------------------------------------------------------------------===//
+
+UnreachableInst::UnreachableInst(LLVMContext &Context,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
+ 0, InsertBefore) {}
+UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
+ 0, InsertAtEnd) {}
+
+//===----------------------------------------------------------------------===//
+// BranchInst Implementation
+//===----------------------------------------------------------------------===//
+
+void BranchInst::AssertOK() {
+ if (isConditional())
+ assert(getCondition()->getType()->isIntegerTy(1) &&
+ "May only branch on boolean predicates!");
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 1, 1,
+ InsertBefore) {
+ assert(IfTrue && "Branch destination may not be null!");
+ Op<-1>() = IfTrue;
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 3, 3,
+ InsertBefore) {
+ // Assign in order of operand index to make use-list order predictable.
+ Op<-3>() = Cond;
+ Op<-2>() = IfFalse;
+ Op<-1>() = IfTrue;
+#ifndef NDEBUG
+ AssertOK();
+#endif
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
+ assert(IfTrue && "Branch destination may not be null!");
+ Op<-1>() = IfTrue;
+}
+
+BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
+ // Assign in order of operand index to make use-list order predictable.
+ Op<-3>() = Cond;
+ Op<-2>() = IfFalse;
+ Op<-1>() = IfTrue;
+#ifndef NDEBUG
+ AssertOK();
+#endif
+}
+
+BranchInst::BranchInst(const BranchInst &BI)
+ : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
+ BI.getNumOperands()) {
+ // Assign in order of operand index to make use-list order predictable.
+ if (BI.getNumOperands() != 1) {
+ assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
+ Op<-3>() = BI.Op<-3>();
+ Op<-2>() = BI.Op<-2>();
+ }
+ Op<-1>() = BI.Op<-1>();
+ SubclassOptionalData = BI.SubclassOptionalData;
+}
+
+void BranchInst::swapSuccessors() {
+ assert(isConditional() &&
+ "Cannot swap successors of an unconditional branch");
+ Op<-1>().swap(Op<-2>());
+
+ // Update profile metadata if present and it matches our structural
+ // expectations.
+ swapProfMetadata();
+}
+
+//===----------------------------------------------------------------------===//
+// AllocaInst Implementation
+//===----------------------------------------------------------------------===//
+
+static Value *getAISize(LLVMContext &Context, Value *Amt) {
+ if (!Amt)
+ Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
+ else {
+ assert(!isa<BasicBlock>(Amt) &&
+ "Passed basic block into allocation size parameter! Use other ctor");
+ assert(Amt->getType()->isIntegerTy() &&
+ "Allocation array size is not an integer!");
+ }
+ return Amt;
+}
+
+static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
+ assert(BB && "Insertion BB cannot be null when alignment not provided!");
+ assert(BB->getParent() &&
+ "BB must be in a Function when alignment not provided!");
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ return DL.getPrefTypeAlign(Ty);
+}
+
+static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
+ assert(I && "Insertion position cannot be null when alignment not provided!");
+ return computeAllocaDefaultAlign(Ty, I->getParent());
+}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
+ Instruction *InsertBefore)
+ : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ const Twine &Name, Instruction *InsertBefore)
+ : AllocaInst(Ty, AddrSpace, ArraySize,
+ computeAllocaDefaultAlign(Ty, InsertBefore), Name,
+ InsertBefore) {}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : AllocaInst(Ty, AddrSpace, ArraySize,
+ computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
+ InsertAtEnd) {}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ Align Align, const Twine &Name,
+ Instruction *InsertBefore)
+ : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
+ getAISize(Ty->getContext(), ArraySize), InsertBefore),
+ AllocatedType(Ty) {
+ setAlignment(Align);
+ assert(!Ty->isVoidTy() && "Cannot allocate void!");
+ setName(Name);
+}
+
+AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
+ Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
+ : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
+ getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
+ AllocatedType(Ty) {
+ setAlignment(Align);
+ assert(!Ty->isVoidTy() && "Cannot allocate void!");
+ setName(Name);
+}
+
+
+bool AllocaInst::isArrayAllocation() const {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
+ return !CI->isOne();
+ return true;
+}
+
+/// isStaticAlloca - Return true if this alloca is in the entry block of the
+/// function and is a constant size. If so, the code generator will fold it
+/// into the prolog/epilog code, so it is basically free.
+bool AllocaInst::isStaticAlloca() const {
+ // Must be constant size.
+ if (!isa<ConstantInt>(getArraySize())) return false;
+
+ // Must be in the entry block.
+ const BasicBlock *Parent = getParent();
+ return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
+}
+
+//===----------------------------------------------------------------------===//
+// LoadInst Implementation
+//===----------------------------------------------------------------------===//
+
+void LoadInst::AssertOK() {
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type.");
+}
+
+static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
+ assert(BB && "Insertion BB cannot be null when alignment not provided!");
+ assert(BB->getParent() &&
+ "BB must be in a Function when alignment not provided!");
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ return DL.getABITypeAlign(Ty);
+}
+
+static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
+ assert(I && "Insertion position cannot be null when alignment not provided!");
+ return computeLoadStoreDefaultAlign(Ty, I->getParent());
+}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
+ Instruction *InsertBef)
+ : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
+ BasicBlock *InsertAE)
+ : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ Instruction *InsertBef)
+ : LoadInst(Ty, Ptr, Name, isVolatile,
+ computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ BasicBlock *InsertAE)
+ : LoadInst(Ty, Ptr, Name, isVolatile,
+ computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ Align Align, Instruction *InsertBef)
+ : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
+ SyncScope::System, InsertBef) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ Align Align, BasicBlock *InsertAE)
+ : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
+ SyncScope::System, InsertAE) {}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ Align Align, AtomicOrdering Order, SyncScope::ID SSID,
+ Instruction *InsertBef)
+ : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
+ assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SSID);
+ AssertOK();
+ setName(Name);
+}
+
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ Align Align, AtomicOrdering Order, SyncScope::ID SSID,
+ BasicBlock *InsertAE)
+ : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
+ assert(cast<PointerType>(Ptr->getType())->isOpaqueOrPointeeTypeMatches(Ty));
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SSID);
+ AssertOK();
+ setName(Name);
+}
+
+//===----------------------------------------------------------------------===//
+// StoreInst Implementation
+//===----------------------------------------------------------------------===//
+
+void StoreInst::AssertOK() {
+ assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
+ assert(getOperand(1)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(cast<PointerType>(getOperand(1)->getType())
+ ->isOpaqueOrPointeeTypeMatches(getOperand(0)->getType()) &&
+ "Ptr must be a pointer to Val type!");
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
+ : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
+ : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ Instruction *InsertBefore)
+ : StoreInst(val, addr, isVolatile,
+ computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
+ InsertBefore) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
+ BasicBlock *InsertAtEnd)
+ : StoreInst(val, addr, isVolatile,
+ computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
+ InsertAtEnd) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
+ Instruction *InsertBefore)
+ : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
+ SyncScope::System, InsertBefore) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
+ BasicBlock *InsertAtEnd)
+ : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
+ SyncScope::System, InsertAtEnd) {}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
+ AtomicOrdering Order, SyncScope::ID SSID,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this), InsertBefore) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SSID);
+ AssertOK();
+}
+
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
+ AtomicOrdering Order, SyncScope::ID SSID,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
+ Op<0>() = val;
+ Op<1>() = addr;
+ setVolatile(isVolatile);
+ setAlignment(Align);
+ setAtomic(Order, SSID);
+ AssertOK();
+}
+
+
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchgInst Implementation
+//===----------------------------------------------------------------------===//
+
+void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ Align Alignment, AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SyncScope::ID SSID) {
+ Op<0>() = Ptr;
+ Op<1>() = Cmp;
+ Op<2>() = NewVal;
+ setSuccessOrdering(SuccessOrdering);
+ setFailureOrdering(FailureOrdering);
+ setSyncScopeID(SSID);
+ setAlignment(Alignment);
+
+ assert(getOperand(0) && getOperand(1) && getOperand(2) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(cast<PointerType>(getOperand(0)->getType())
+ ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
+ "Ptr must be a pointer to Cmp type!");
+ assert(cast<PointerType>(getOperand(0)->getType())
+ ->isOpaqueOrPointeeTypeMatches(getOperand(2)->getType()) &&
+ "Ptr must be a pointer to NewVal type!");
+ assert(getOperand(1)->getType() == getOperand(2)->getType() &&
+ "Cmp type and NewVal type must be same!");
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ Align Alignment,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SyncScope::ID SSID,
+ Instruction *InsertBefore)
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
+ Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
+}
+
+AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ Align Alignment,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SyncScope::ID SSID,
+ BasicBlock *InsertAtEnd)
+ : Instruction(
+ StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
+ AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
+ OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
+ Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWInst Implementation
+//===----------------------------------------------------------------------===//
+
+void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
+ Align Alignment, AtomicOrdering Ordering,
+ SyncScope::ID SSID) {
+ Op<0>() = Ptr;
+ Op<1>() = Val;
+ setOperation(Operation);
+ setOrdering(Ordering);
+ setSyncScopeID(SSID);
+ setAlignment(Alignment);
+
+ assert(getOperand(0) && getOperand(1) &&
+ "All operands must be non-null!");
+ assert(getOperand(0)->getType()->isPointerTy() &&
+ "Ptr must have pointer type!");
+ assert(cast<PointerType>(getOperand(0)->getType())
+ ->isOpaqueOrPointeeTypeMatches(getOperand(1)->getType()) &&
+ "Ptr must be a pointer to Val type!");
+ assert(Ordering != AtomicOrdering::NotAtomic &&
+ "AtomicRMW instructions must be atomic!");
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ Align Alignment, AtomicOrdering Ordering,
+ SyncScope::ID SSID, Instruction *InsertBefore)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
+ Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
+}
+
+AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ Align Alignment, AtomicOrdering Ordering,
+ SyncScope::ID SSID, BasicBlock *InsertAtEnd)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
+ Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
+}
+
+StringRef AtomicRMWInst::getOperationName(BinOp Op) {
+ switch (Op) {
+ case AtomicRMWInst::Xchg:
+ return "xchg";
+ case AtomicRMWInst::Add:
+ return "add";
+ case AtomicRMWInst::Sub:
+ return "sub";
+ case AtomicRMWInst::And:
+ return "and";
+ case AtomicRMWInst::Nand:
+ return "nand";
+ case AtomicRMWInst::Or:
+ return "or";
+ case AtomicRMWInst::Xor:
+ return "xor";
+ case AtomicRMWInst::Max:
+ return "max";
+ case AtomicRMWInst::Min:
+ return "min";
+ case AtomicRMWInst::UMax:
+ return "umax";
+ case AtomicRMWInst::UMin:
+ return "umin";
+ case AtomicRMWInst::FAdd:
+ return "fadd";
+ case AtomicRMWInst::FSub:
+ return "fsub";
+ case AtomicRMWInst::FMax:
+ return "fmax";
+ case AtomicRMWInst::FMin:
+ return "fmin";
+ case AtomicRMWInst::BAD_BINOP:
+ return "<invalid operation>";
+ }
+
+ llvm_unreachable("invalid atomicrmw operation");
+}
+
+//===----------------------------------------------------------------------===//
+// FenceInst Implementation
+//===----------------------------------------------------------------------===//
+
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SyncScope::ID SSID,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
+ setOrdering(Ordering);
+ setSyncScopeID(SSID);
+}
+
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SyncScope::ID SSID,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
+ setOrdering(Ordering);
+ setSyncScopeID(SSID);
+}
+
+//===----------------------------------------------------------------------===//
+// GetElementPtrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name) {
+ assert(getNumOperands() == 1 + IdxList.size() &&
+ "NumOperands not initialized?");
+ Op<0>() = Ptr;
+ llvm::copy(IdxList, op_begin() + 1);
+ setName(Name);
+}
+
+GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
+ : Instruction(GEPI.getType(), GetElementPtr,
+ OperandTraits<GetElementPtrInst>::op_end(this) -
+ GEPI.getNumOperands(),
+ GEPI.getNumOperands()),
+ SourceElementType(GEPI.SourceElementType),
+ ResultElementType(GEPI.ResultElementType) {
+ std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
+ SubclassOptionalData = GEPI.SubclassOptionalData;
+}
+
+Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
+ if (auto *Struct = dyn_cast<StructType>(Ty)) {
+ if (!Struct->indexValid(Idx))
+ return nullptr;
+ return Struct->getTypeAtIndex(Idx);
+ }
+ if (!Idx->getType()->isIntOrIntVectorTy())
+ return nullptr;
+ if (auto *Array = dyn_cast<ArrayType>(Ty))
+ return Array->getElementType();
+ if (auto *Vector = dyn_cast<VectorType>(Ty))
+ return Vector->getElementType();
+ return nullptr;
+}
+
+Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
+ if (auto *Struct = dyn_cast<StructType>(Ty)) {
+ if (Idx >= Struct->getNumElements())
+ return nullptr;
+ return Struct->getElementType(Idx);
+ }
+ if (auto *Array = dyn_cast<ArrayType>(Ty))
+ return Array->getElementType();
+ if (auto *Vector = dyn_cast<VectorType>(Ty))
+ return Vector->getElementType();
+ return nullptr;
+}
+
+template <typename IndexTy>
+static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
+ if (IdxList.empty())
+ return Ty;
+ for (IndexTy V : IdxList.slice(1)) {
+ Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
+ if (!Ty)
+ return Ty;
+ }
+ return Ty;
+}
+
+Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
+ return getIndexedTypeInternal(Ty, IdxList);
+}
+
+Type *GetElementPtrInst::getIndexedType(Type *Ty,
+ ArrayRef<Constant *> IdxList) {
+ return getIndexedTypeInternal(Ty, IdxList);
+}
+
+Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
+ return getIndexedTypeInternal(Ty, IdxList);
+}
+
+/// hasAllZeroIndices - Return true if all of the indices of this GEP are
+/// zeros. If so, the result pointer and the first operand have the same
+/// value, just potentially different types.
+bool GetElementPtrInst::hasAllZeroIndices() const {
+ for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
+ if (!CI->isZero()) return false;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+/// hasAllConstantIndices - Return true if all of the indices of this GEP are
+/// constant integers. If so, the result pointer and the first operand have
+/// a constant offset between them.
+bool GetElementPtrInst::hasAllConstantIndices() const {
+ for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+ if (!isa<ConstantInt>(getOperand(i)))
+ return false;
+ }
+ return true;
+}
+
+void GetElementPtrInst::setIsInBounds(bool B) {
+ cast<GEPOperator>(this)->setIsInBounds(B);
+}
+
+bool GetElementPtrInst::isInBounds() const {
+ return cast<GEPOperator>(this)->isInBounds();
+}
+
+bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
+ APInt &Offset) const {
+ // Delegate to the generic GEPOperator implementation.
+ return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
+}
+
+bool GetElementPtrInst::collectOffset(
+ const DataLayout &DL, unsigned BitWidth,
+ MapVector<Value *, APInt> &VariableOffsets,
+ APInt &ConstantOffset) const {
+ // Delegate to the generic GEPOperator implementation.
+ return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
+ ConstantOffset);
+}
+
+//===----------------------------------------------------------------------===//
+// ExtractElementInst Implementation
+//===----------------------------------------------------------------------===//
+
+ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
+ const Twine &Name,
+ Instruction *InsertBef)
+ : Instruction(cast<VectorType>(Val->getType())->getElementType(),
+ ExtractElement,
+ OperandTraits<ExtractElementInst>::op_begin(this),
+ 2, InsertBef) {
+ assert(isValidOperands(Val, Index) &&
+ "Invalid extractelement instruction operands!");
+ Op<0>() = Val;
+ Op<1>() = Index;
+ setName(Name);
+}
+
+ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
+ const Twine &Name,
+ BasicBlock *InsertAE)
+ : Instruction(cast<VectorType>(Val->getType())->getElementType(),
+ ExtractElement,
+ OperandTraits<ExtractElementInst>::op_begin(this),
+ 2, InsertAE) {
+ assert(isValidOperands(Val, Index) &&
+ "Invalid extractelement instruction operands!");
+
+ Op<0>() = Val;
+ Op<1>() = Index;
+ setName(Name);
+}
+
+bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
+ if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
+ return false;
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// InsertElementInst Implementation
+//===----------------------------------------------------------------------===//
+
+InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
+ const Twine &Name,
+ Instruction *InsertBef)
+ : Instruction(Vec->getType(), InsertElement,
+ OperandTraits<InsertElementInst>::op_begin(this),
+ 3, InsertBef) {
+ assert(isValidOperands(Vec, Elt, Index) &&
+ "Invalid insertelement instruction operands!");
+ Op<0>() = Vec;
+ Op<1>() = Elt;
+ Op<2>() = Index;
+ setName(Name);
+}
+
+InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
+ const Twine &Name,
+ BasicBlock *InsertAE)
+ : Instruction(Vec->getType(), InsertElement,
+ OperandTraits<InsertElementInst>::op_begin(this),
+ 3, InsertAE) {
+ assert(isValidOperands(Vec, Elt, Index) &&
+ "Invalid insertelement instruction operands!");
+
+ Op<0>() = Vec;
+ Op<1>() = Elt;
+ Op<2>() = Index;
+ setName(Name);
+}
+
+bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
+ const Value *Index) {
+ if (!Vec->getType()->isVectorTy())
+ return false; // First operand of insertelement must be vector type.
+
+ if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
+ return false;// Second operand of insertelement must be vector element type.
+
+ if (!Index->getType()->isIntegerTy())
+ return false; // Third operand of insertelement must be i32.
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// ShuffleVectorInst Implementation
+//===----------------------------------------------------------------------===//
+
+static Value *createPlaceholderForShuffleVector(Value *V) {
+ assert(V && "Cannot create placeholder of nullptr V");
+ return PoisonValue::get(V->getType());
+}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
+ Instruction *InsertBefore)
+ : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
+ InsertBefore) {}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *Mask, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
+ InsertAtEnd) {}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
+ const Twine &Name,
+ Instruction *InsertBefore)
+ : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
+ InsertBefore) {}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, ArrayRef<int> Mask,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : ShuffleVectorInst(V1, createPlaceholderForShuffleVector(V1), Mask, Name,
+ InsertAtEnd) {}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+ const Twine &Name,
+ Instruction *InsertBefore)
+ : Instruction(
+ VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
+ cast<VectorType>(Mask->getType())->getElementCount()),
+ ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
+ OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
+ assert(isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+
+ Op<0>() = V1;
+ Op<1>() = V2;
+ SmallVector<int, 16> MaskArr;
+ getShuffleMask(cast<Constant>(Mask), MaskArr);
+ setShuffleMask(MaskArr);
+ setName(Name);
+}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : Instruction(
+ VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
+ cast<VectorType>(Mask->getType())->getElementCount()),
+ ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
+ OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
+ assert(isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+
+ Op<0>() = V1;
+ Op<1>() = V2;
+ SmallVector<int, 16> MaskArr;
+ getShuffleMask(cast<Constant>(Mask), MaskArr);
+ setShuffleMask(MaskArr);
+ setName(Name);
+}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
+ const Twine &Name,
+ Instruction *InsertBefore)
+ : Instruction(
+ VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
+ Mask.size(), isa<ScalableVectorType>(V1->getType())),
+ ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
+ OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
+ assert(isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+ Op<0>() = V1;
+ Op<1>() = V2;
+ setShuffleMask(Mask);
+ setName(Name);
+}
+
+ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : Instruction(
+ VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
+ Mask.size(), isa<ScalableVectorType>(V1->getType())),
+ ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
+ OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
+ assert(isValidOperands(V1, V2, Mask) &&
+ "Invalid shuffle vector instruction operands!");
+
+ Op<0>() = V1;
+ Op<1>() = V2;
+ setShuffleMask(Mask);
+ setName(Name);
+}
+
+void ShuffleVectorInst::commute() {
+ int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
+ int NumMaskElts = ShuffleMask.size();
+ SmallVector<int, 16> NewMask(NumMaskElts);
+ for (int i = 0; i != NumMaskElts; ++i) {
+ int MaskElt = getMaskValue(i);
+ if (MaskElt == UndefMaskElem) {
+ NewMask[i] = UndefMaskElem;
+ continue;
+ }
+ assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
+ MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
+ NewMask[i] = MaskElt;
+ }
+ setShuffleMask(NewMask);
+ Op<0>().swap(Op<1>());
+}
+
+bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
+ ArrayRef<int> Mask) {
+ // V1 and V2 must be vectors of the same type.
+ if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
+ return false;
+
+ // Make sure the mask elements make sense.
+ int V1Size =
+ cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
+ for (int Elem : Mask)
+ if (Elem != UndefMaskElem && Elem >= V1Size * 2)
+ return false;
+
+ if (isa<ScalableVectorType>(V1->getType()))
+ if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask))
+ return false;
+
+ return true;
+}
+
+bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
+ const Value *Mask) {
+ // V1 and V2 must be vectors of the same type.
+ if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
+ return false;
+
+ // Mask must be vector of i32, and must be the same kind of vector as the
+ // input vectors
+ auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
+ if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
+ isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
+ return false;
+
+ // Check to see if Mask is valid.
+ if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
+ return true;
+
+ if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
+ unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
+ for (Value *Op : MV->operands()) {
+ if (auto *CI = dyn_cast<ConstantInt>(Op)) {
+ if (CI->uge(V1Size*2))
+ return false;
+ } else if (!isa<UndefValue>(Op)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
+ unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
+ for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
+ i != e; ++i)
+ if (CDS->getElementAsInteger(i) >= V1Size*2)
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
+void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
+ SmallVectorImpl<int> &Result) {
+ ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
+
+ if (isa<ConstantAggregateZero>(Mask)) {
+ Result.resize(EC.getKnownMinValue(), 0);
+ return;
+ }
+
+ Result.reserve(EC.getKnownMinValue());
+
+ if (EC.isScalable()) {
+ assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
+ "Scalable vector shuffle mask must be undef or zeroinitializer");
+ int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
+ for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
+ Result.emplace_back(MaskVal);
+ return;
+ }
+
+ unsigned NumElts = EC.getKnownMinValue();
+
+ if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
+ for (unsigned i = 0; i != NumElts; ++i)
+ Result.push_back(CDS->getElementAsInteger(i));
+ return;
+ }
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *C = Mask->getAggregateElement(i);
+ Result.push_back(isa<UndefValue>(C) ? -1 :
+ cast<ConstantInt>(C)->getZExtValue());
+ }
+}
+
+void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
+ ShuffleMask.assign(Mask.begin(), Mask.end());
+ ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
+}
+
+Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
+ Type *ResultTy) {
+ Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
+ if (isa<ScalableVectorType>(ResultTy)) {
+ assert(is_splat(Mask) && "Unexpected shuffle");
+ Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
+ if (Mask[0] == 0)
+ return Constant::getNullValue(VecTy);
+ return UndefValue::get(VecTy);
+ }
+ SmallVector<Constant *, 16> MaskConst;
+ for (int Elem : Mask) {
+ if (Elem == UndefMaskElem)
+ MaskConst.push_back(UndefValue::get(Int32Ty));
+ else
+ MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
+ }
+ return ConstantVector::get(MaskConst);
+}
+
+static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
+ assert(!Mask.empty() && "Shuffle mask must contain elements");
+ bool UsesLHS = false;
+ bool UsesRHS = false;
+ for (int I : Mask) {
+ if (I == -1)
+ continue;
+ assert(I >= 0 && I < (NumOpElts * 2) &&
+ "Out-of-bounds shuffle mask element");
+ UsesLHS |= (I < NumOpElts);
+ UsesRHS |= (I >= NumOpElts);
+ if (UsesLHS && UsesRHS)
+ return false;
+ }
+ // Allow for degenerate case: completely undef mask means neither source is used.
+ return UsesLHS || UsesRHS;
+}
+
+bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
+ // We don't have vector operand size information, so assume operands are the
+ // same size as the mask.
+ return isSingleSourceMaskImpl(Mask, Mask.size());
+}
+
+static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
+ if (!isSingleSourceMaskImpl(Mask, NumOpElts))
+ return false;
+ for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
+ if (Mask[i] == -1)
+ continue;
+ if (Mask[i] != i && Mask[i] != (NumOpElts + i))
+ return false;
+ }
+ return true;
+}
+
+bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
+ // We don't have vector operand size information, so assume operands are the
+ // same size as the mask.
+ return isIdentityMaskImpl(Mask, Mask.size());
+}
+
+bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
+ if (!isSingleSourceMask(Mask))
+ return false;
+
+ // The number of elements in the mask must be at least 2.
+ int NumElts = Mask.size();
+ if (NumElts < 2)
+ return false;
+
+ for (int i = 0; i < NumElts; ++i) {
+ if (Mask[i] == -1)
+ continue;
+ if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
+ return false;
+ }
+ return true;
+}
+
+bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) {
+ if (!isSingleSourceMask(Mask))
+ return false;
+ for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
+ if (Mask[i] == -1)
+ continue;
+ if (Mask[i] != 0 && Mask[i] != NumElts)
+ return false;
+ }
+ return true;
+}
+
+bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) {
+ // Select is differentiated from identity. It requires using both sources.
+ if (isSingleSourceMask(Mask))
+ return false;
+ for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
+ if (Mask[i] == -1)
+ continue;
+ if (Mask[i] != i && Mask[i] != (NumElts + i))
+ return false;
+ }
+ return true;
+}
+
+bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
+ // Example masks that will return true:
+ // v1 = <a, b, c, d>
+ // v2 = <e, f, g, h>
+ // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
+ // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
+
+ // 1. The number of elements in the mask must be a power-of-2 and at least 2.
+ int NumElts = Mask.size();
+ if (NumElts < 2 || !isPowerOf2_32(NumElts))
+ return false;
+
+ // 2. The first element of the mask must be either a 0 or a 1.
+ if (Mask[0] != 0 && Mask[0] != 1)
+ return false;
+
+ // 3. The difference between the first 2 elements must be equal to the
+ // number of elements in the mask.
+ if ((Mask[1] - Mask[0]) != NumElts)
+ return false;
+
+ // 4. The difference between consecutive even-numbered and odd-numbered
+ // elements must be equal to 2.
+ for (int i = 2; i < NumElts; ++i) {
+ int MaskEltVal = Mask[i];
+ if (MaskEltVal == -1)
+ return false;
+ int MaskEltPrevVal = Mask[i - 2];
+ if (MaskEltVal - MaskEltPrevVal != 2)
+ return false;
+ }
+ return true;
+}
+
+bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
+ int NumSrcElts, int &Index) {
+ // Must extract from a single source.
+ if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
+ return false;
+
+ // Must be smaller (else this is an Identity shuffle).
+ if (NumSrcElts <= (int)Mask.size())
+ return false;
+
+ // Find start of extraction, accounting that we may start with an UNDEF.
+ int SubIndex = -1;
+ for (int i = 0, e = Mask.size(); i != e; ++i) {
+ int M = Mask[i];
+ if (M < 0)
+ continue;
+ int Offset = (M % NumSrcElts) - i;
+ if (0 <= SubIndex && SubIndex != Offset)
+ return false;
+ SubIndex = Offset;
+ }
+
+ if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
+ Index = SubIndex;
+ return true;
+ }
+ return false;
+}
+
+bool ShuffleVectorInst::isInsertSubvectorMask(ArrayRef<int> Mask,
+ int NumSrcElts, int &NumSubElts,
+ int &Index) {
+ int NumMaskElts = Mask.size();
+
+ // Don't try to match if we're shuffling to a smaller size.
+ if (NumMaskElts < NumSrcElts)
+ return false;
+
+ // TODO: We don't recognize self-insertion/widening.
+ if (isSingleSourceMaskImpl(Mask, NumSrcElts))
+ return false;
+
+ // Determine which mask elements are attributed to which source.
+ APInt UndefElts = APInt::getZero(NumMaskElts);
+ APInt Src0Elts = APInt::getZero(NumMaskElts);
+ APInt Src1Elts = APInt::getZero(NumMaskElts);
+ bool Src0Identity = true;
+ bool Src1Identity = true;
+
+ for (int i = 0; i != NumMaskElts; ++i) {
+ int M = Mask[i];
+ if (M < 0) {
+ UndefElts.setBit(i);
+ continue;
+ }
+ if (M < NumSrcElts) {
+ Src0Elts.setBit(i);
+ Src0Identity &= (M == i);
+ continue;
+ }
+ Src1Elts.setBit(i);
+ Src1Identity &= (M == (i + NumSrcElts));
+ }
+ assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
+ "unknown shuffle elements");
+ assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
+ "2-source shuffle not found");
+
+ // Determine lo/hi span ranges.
+ // TODO: How should we handle undefs at the start of subvector insertions?
+ int Src0Lo = Src0Elts.countTrailingZeros();
+ int Src1Lo = Src1Elts.countTrailingZeros();
+ int Src0Hi = NumMaskElts - Src0Elts.countLeadingZeros();
+ int Src1Hi = NumMaskElts - Src1Elts.countLeadingZeros();
+
+ // If src0 is in place, see if the src1 elements is inplace within its own
+ // span.
+ if (Src0Identity) {
+ int NumSub1Elts = Src1Hi - Src1Lo;
+ ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
+ if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
+ NumSubElts = NumSub1Elts;
+ Index = Src1Lo;
+ return true;
+ }
+ }
+
+ // If src1 is in place, see if the src0 elements is inplace within its own
+ // span.
+ if (Src1Identity) {
+ int NumSub0Elts = Src0Hi - Src0Lo;
+ ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
+ if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
+ NumSubElts = NumSub0Elts;
+ Index = Src0Lo;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool ShuffleVectorInst::isIdentityWithPadding() const {
+ if (isa<UndefValue>(Op<2>()))
+ return false;
+
+ // FIXME: Not currently possible to express a shuffle mask for a scalable
+ // vector for this case.
+ if (isa<ScalableVectorType>(getType()))
+ return false;
+
+ int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
+ int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
+ if (NumMaskElts <= NumOpElts)
+ return false;
+
+ // The first part of the mask must choose elements from exactly 1 source op.
+ ArrayRef<int> Mask = getShuffleMask();
+ if (!isIdentityMaskImpl(Mask, NumOpElts))
+ return false;
+
+ // All extending must be with undef elements.
+ for (int i = NumOpElts; i < NumMaskElts; ++i)
+ if (Mask[i] != -1)
+ return false;
+
+ return true;
+}
+
+bool ShuffleVectorInst::isIdentityWithExtract() const {
+ if (isa<UndefValue>(Op<2>()))
+ return false;
+
+ // FIXME: Not currently possible to express a shuffle mask for a scalable
+ // vector for this case.
+ if (isa<ScalableVectorType>(getType()))
+ return false;
+
+ int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
+ int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
+ if (NumMaskElts >= NumOpElts)
+ return false;
+
+ return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
+}
+
+bool ShuffleVectorInst::isConcat() const {
+ // Vector concatenation is differentiated from identity with padding.
+ if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
+ isa<UndefValue>(Op<2>()))
+ return false;
+
+ // FIXME: Not currently possible to express a shuffle mask for a scalable
+ // vector for this case.
+ if (isa<ScalableVectorType>(getType()))
+ return false;
+
+ int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
+ int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
+ if (NumMaskElts != NumOpElts * 2)
+ return false;
+
+ // Use the mask length rather than the operands' vector lengths here. We
+ // already know that the shuffle returns a vector twice as long as the inputs,
+ // and neither of the inputs are undef vectors. If the mask picks consecutive
+ // elements from both inputs, then this is a concatenation of the inputs.
+ return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
+}
+
+static bool isReplicationMaskWithParams(ArrayRef<int> Mask,
+ int ReplicationFactor, int VF) {
+ assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
+ "Unexpected mask size.");
+
+ for (int CurrElt : seq(0, VF)) {
+ ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
+ assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
+ "Run out of mask?");
+ Mask = Mask.drop_front(ReplicationFactor);
+ if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
+ return MaskElt == UndefMaskElem || MaskElt == CurrElt;
+ }))
+ return false;
+ }
+ assert(Mask.empty() && "Did not consume the whole mask?");
+
+ return true;
+}
+
+bool ShuffleVectorInst::isReplicationMask(ArrayRef<int> Mask,
+ int &ReplicationFactor, int &VF) {
+ // undef-less case is trivial.
+ if (none_of(Mask, [](int MaskElt) { return MaskElt == UndefMaskElem; })) {
+ ReplicationFactor =
+ Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
+ if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
+ return false;
+ VF = Mask.size() / ReplicationFactor;
+ return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
+ }
+
+ // However, if the mask contains undef's, we have to enumerate possible tuples
+ // and pick one. There are bounds on replication factor: [1, mask size]
+ // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
+ // Additionally, mask size is a replication factor multiplied by vector size,
+ // which further significantly reduces the search space.
+
+ // Before doing that, let's perform basic correctness checking first.
+ int Largest = -1;
+ for (int MaskElt : Mask) {
+ if (MaskElt == UndefMaskElem)
+ continue;
+ // Elements must be in non-decreasing order.
+ if (MaskElt < Largest)
+ return false;
+ Largest = std::max(Largest, MaskElt);
+ }
+
+ // Prefer larger replication factor if all else equal.
+ for (int PossibleReplicationFactor :
+ reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
+ if (Mask.size() % PossibleReplicationFactor != 0)
+ continue;
+ int PossibleVF = Mask.size() / PossibleReplicationFactor;
+ if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
+ PossibleVF))
+ continue;
+ ReplicationFactor = PossibleReplicationFactor;
+ VF = PossibleVF;
+ return true;
+ }
+
+ return false;
+}
+
+bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
+ int &VF) const {
+ // Not possible to express a shuffle mask for a scalable vector for this
+ // case.
+ if (isa<ScalableVectorType>(getType()))
+ return false;
+
+ VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
+ if (ShuffleMask.size() % VF != 0)
+ return false;
+ ReplicationFactor = ShuffleMask.size() / VF;
+
+ return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
+}
+
+//===----------------------------------------------------------------------===//
+// InsertValueInst Class
+//===----------------------------------------------------------------------===//
+
+void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+ const Twine &Name) {
+ assert(getNumOperands() == 2 && "NumOperands not initialized?");
+
+ // There's no fundamental reason why we require at least one index
+ // (other than weirdness with &*IdxBegin being invalid; see
+ // getelementptr's init routine for example). But there's no
+ // present need to support it.
+ assert(!Idxs.empty() && "InsertValueInst must have at least one index");
+
+ assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
+ Val->getType() && "Inserted value must match indexed type!");
+ Op<0>() = Agg;
+ Op<1>() = Val;
+
+ Indices.append(Idxs.begin(), Idxs.end());
+ setName(Name);
+}
+
+InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
+ : Instruction(IVI.getType(), InsertValue,
+ OperandTraits<InsertValueInst>::op_begin(this), 2),
+ Indices(IVI.Indices) {
+ Op<0>() = IVI.getOperand(0);
+ Op<1>() = IVI.getOperand(1);
+ SubclassOptionalData = IVI.SubclassOptionalData;
+}
+
+//===----------------------------------------------------------------------===//
+// ExtractValueInst Class
+//===----------------------------------------------------------------------===//
+
+void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
+ assert(getNumOperands() == 1 && "NumOperands not initialized?");
+
+ // There's no fundamental reason why we require at least one index.
+ // But there's no present need to support it.
+ assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
+
+ Indices.append(Idxs.begin(), Idxs.end());
+ setName(Name);
+}
+
+ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
+ : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
+ Indices(EVI.Indices) {
+ SubclassOptionalData = EVI.SubclassOptionalData;
+}
+
+// getIndexedType - Returns the type of the element that would be extracted
+// with an extractvalue instruction with the specified parameters.
+//
+// A null type is returned if the indices are invalid for the specified
+// pointer type.
+//
+Type *ExtractValueInst::getIndexedType(Type *Agg,
+ ArrayRef<unsigned> Idxs) {
+ for (unsigned Index : Idxs) {
+ // We can't use CompositeType::indexValid(Index) here.
+ // indexValid() always returns true for arrays because getelementptr allows
+ // out-of-bounds indices. Since we don't allow those for extractvalue and
+ // insertvalue we need to check array indexing manually.
+ // Since the only other types we can index into are struct types it's just
+ // as easy to check those manually as well.
+ if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
+ if (Index >= AT->getNumElements())
+ return nullptr;
+ Agg = AT->getElementType();
+ } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
+ if (Index >= ST->getNumElements())
+ return nullptr;
+ Agg = ST->getElementType(Index);
+ } else {
+ // Not a valid type to index into.
+ return nullptr;
+ }
+ }
+ return const_cast<Type*>(Agg);
+}
+
+//===----------------------------------------------------------------------===//
+// UnaryOperator Class
+//===----------------------------------------------------------------------===//
+
+UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
+ Type *Ty, const Twine &Name,
+ Instruction *InsertBefore)
+ : UnaryInstruction(Ty, iType, S, InsertBefore) {
+ Op<0>() = S;
+ setName(Name);
+ AssertOK();
+}
+
+UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
+ Type *Ty, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
+ Op<0>() = S;
+ setName(Name);
+ AssertOK();
+}
+
+UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
+}
+
+UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ UnaryOperator *Res = Create(Op, S, Name);
+ InsertAtEnd->getInstList().push_back(Res);
+ return Res;
+}
+
+void UnaryOperator::AssertOK() {
+ Value *LHS = getOperand(0);
+ (void)LHS; // Silence warnings.
+#ifndef NDEBUG
+ switch (getOpcode()) {
+ case FNeg:
+ assert(getType() == LHS->getType() &&
+ "Unary operation should return same type as operand!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// BinaryOperator Class
+//===----------------------------------------------------------------------===//
+
+BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
+ Type *Ty, const Twine &Name,
+ Instruction *InsertBefore)
+ : Instruction(Ty, iType,
+ OperandTraits<BinaryOperator>::op_begin(this),
+ OperandTraits<BinaryOperator>::operands(this),
+ InsertBefore) {
+ Op<0>() = S1;
+ Op<1>() = S2;
+ setName(Name);
+ AssertOK();
+}
+
+BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
+ Type *Ty, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Ty, iType,
+ OperandTraits<BinaryOperator>::op_begin(this),
+ OperandTraits<BinaryOperator>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = S1;
+ Op<1>() = S2;
+ setName(Name);
+ AssertOK();
+}
+
+void BinaryOperator::AssertOK() {
+ Value *LHS = getOperand(0), *RHS = getOperand(1);
+ (void)LHS; (void)RHS; // Silence warnings.
+ assert(LHS->getType() == RHS->getType() &&
+ "Binary operator operand types must match!");
+#ifndef NDEBUG
+ switch (getOpcode()) {
+ case Add: case Sub:
+ case Mul:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isIntOrIntVectorTy() &&
+ "Tried to create an integer operation on a non-integer type!");
+ break;
+ case FAdd: case FSub:
+ case FMul:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ case UDiv:
+ case SDiv:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isIntOrIntVectorTy() &&
+ "Incorrect operand type (not integer) for S/UDIV");
+ break;
+ case FDiv:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Incorrect operand type (not floating point) for FDIV");
+ break;
+ case URem:
+ case SRem:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isIntOrIntVectorTy() &&
+ "Incorrect operand type (not integer) for S/UREM");
+ break;
+ case FRem:
+ assert(getType() == LHS->getType() &&
+ "Arithmetic operation should return same type as operands!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Incorrect operand type (not floating point) for FREM");
+ break;
+ case Shl:
+ case LShr:
+ case AShr:
+ assert(getType() == LHS->getType() &&
+ "Shift operation should return same type as operands!");
+ assert(getType()->isIntOrIntVectorTy() &&
+ "Tried to create a shift operation on a non-integral type!");
+ break;
+ case And: case Or:
+ case Xor:
+ assert(getType() == LHS->getType() &&
+ "Logical operation should return same type as operands!");
+ assert(getType()->isIntOrIntVectorTy() &&
+ "Tried to create a logical operation on a non-integral type!");
+ break;
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+#endif
+}
+
+BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S1->getType() == S2->getType() &&
+ "Cannot create binary operator with two operands of differing type!");
+ return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ BinaryOperator *Res = Create(Op, S1, S2, Name);
+ InsertAtEnd->getInstList().push_back(Res);
+ return Res;
+}
+
+BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return new BinaryOperator(Instruction::Sub,
+ zero, Op,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return new BinaryOperator(Instruction::Sub,
+ zero, Op,
+ Op->getType(), Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
+ return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
+}
+
+BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
+ Instruction *InsertBefore) {
+ Constant *C = Constant::getAllOnesValue(Op->getType());
+ return new BinaryOperator(Instruction::Xor, Op, C,
+ Op->getType(), Name, InsertBefore);
+}
+
+BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
+ return new BinaryOperator(Instruction::Xor, Op, AllOnes,
+ Op->getType(), Name, InsertAtEnd);
+}
+
+// Exchange the two operands to this instruction. This instruction is safe to
+// use on any binary instruction and does not modify the semantics of the
+// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
+// is changed.
+bool BinaryOperator::swapOperands() {
+ if (!isCommutative())
+ return true; // Can't commute operands
+ Op<0>().swap(Op<1>());
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// FPMathOperator Class
+//===----------------------------------------------------------------------===//
+
+float FPMathOperator::getFPAccuracy() const {
+ const MDNode *MD =
+ cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
+ if (!MD)
+ return 0.0;
+ ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
+ return Accuracy->getValueAPF().convertToFloat();
+}
+
+//===----------------------------------------------------------------------===//
+// CastInst Class
+//===----------------------------------------------------------------------===//
+
+// Just determine if this cast only deals with integral->integral conversion.
+bool CastInst::isIntegerCast() const {
+ switch (getOpcode()) {
+ default: return false;
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::Trunc:
+ return true;
+ case Instruction::BitCast:
+ return getOperand(0)->getType()->isIntegerTy() &&
+ getType()->isIntegerTy();
+ }
+}
+
+bool CastInst::isLosslessCast() const {
+ // Only BitCast can be lossless, exit fast if we're not BitCast
+ if (getOpcode() != Instruction::BitCast)
+ return false;
+
+ // Identity cast is always lossless
+ Type *SrcTy = getOperand(0)->getType();
+ Type *DstTy = getType();
+ if (SrcTy == DstTy)
+ return true;
+
+ // Pointer to pointer is always lossless.
+ if (SrcTy->isPointerTy())
+ return DstTy->isPointerTy();
+ return false; // Other types have no identity values
+}
+
+/// This function determines if the CastInst does not require any bits to be
+/// changed in order to effect the cast. Essentially, it identifies cases where
+/// no code gen is necessary for the cast, hence the name no-op cast. For
+/// example, the following are all no-op casts:
+/// # bitcast i32* %x to i8*
+/// # bitcast <2 x i32> %x to <4 x i16>
+/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
+/// Determine if the described cast is a no-op.
+bool CastInst::isNoopCast(Instruction::CastOps Opcode,
+ Type *SrcTy,
+ Type *DestTy,
+ const DataLayout &DL) {
+ assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
+ switch (Opcode) {
+ default: llvm_unreachable("Invalid CastOp");
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::AddrSpaceCast:
+ // TODO: Target informations may give a more accurate answer here.
+ return false;
+ case Instruction::BitCast:
+ return true; // BitCast never modifies bits.
+ case Instruction::PtrToInt:
+ return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
+ DestTy->getScalarSizeInBits();
+ case Instruction::IntToPtr:
+ return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
+ SrcTy->getScalarSizeInBits();
+ }
+}
+
+bool CastInst::isNoopCast(const DataLayout &DL) const {
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
+}
+
+/// This function determines if a pair of casts can be eliminated and what
+/// opcode should be used in the elimination. This assumes that there are two
+/// instructions like this:
+/// * %F = firstOpcode SrcTy %x to MidTy
+/// * %S = secondOpcode MidTy %F to DstTy
+/// The function returns a resultOpcode so these two casts can be replaced with:
+/// * %Replacement = resultOpcode %SrcTy %x to DstTy
+/// If no such cast is permitted, the function returns 0.
+unsigned CastInst::isEliminableCastPair(
+ Instruction::CastOps firstOp, Instruction::CastOps secondOp,
+ Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
+ Type *DstIntPtrTy) {
+ // Define the 144 possibilities for these two cast instructions. The values
+ // in this matrix determine what to do in a given situation and select the
+ // case in the switch below. The rows correspond to firstOp, the columns
+ // correspond to secondOp. In looking at the table below, keep in mind
+ // the following cast properties:
+ //
+ // Size Compare Source Destination
+ // Operator Src ? Size Type Sign Type Sign
+ // -------- ------------ ------------------- ---------------------
+ // TRUNC > Integer Any Integral Any
+ // ZEXT < Integral Unsigned Integer Any
+ // SEXT < Integral Signed Integer Any
+ // FPTOUI n/a FloatPt n/a Integral Unsigned
+ // FPTOSI n/a FloatPt n/a Integral Signed
+ // UITOFP n/a Integral Unsigned FloatPt n/a
+ // SITOFP n/a Integral Signed FloatPt n/a
+ // FPTRUNC > FloatPt n/a FloatPt n/a
+ // FPEXT < FloatPt n/a FloatPt n/a
+ // PTRTOINT n/a Pointer n/a Integral Unsigned
+ // INTTOPTR n/a Integral Unsigned Pointer n/a
+ // BITCAST = FirstClass n/a FirstClass n/a
+ // ADDRSPCST n/a Pointer n/a Pointer n/a
+ //
+ // NOTE: some transforms are safe, but we consider them to be non-profitable.
+ // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
+ // into "fptoui double to i64", but this loses information about the range
+ // of the produced value (we no longer know the top-part is all zeros).
+ // Further this conversion is often much more expensive for typical hardware,
+ // and causes issues when building libgcc. We disallow fptosi+sext for the
+ // same reason.
+ const unsigned numCastOps =
+ Instruction::CastOpsEnd - Instruction::CastOpsBegin;
+ static const uint8_t CastResults[numCastOps][numCastOps] = {
+ // T F F U S F F P I B A -+
+ // R Z S P P I I T P 2 N T S |
+ // U E E 2 2 2 2 R E I T C C +- secondOp
+ // N X X U S F F N X N 2 V V |
+ // C T T I I P P C T T P T T -+
+ { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
+ { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
+ { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
+ { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
+ { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
+ { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
+ { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
+ { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
+ { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
+ { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
+ };
+
+ // TODO: This logic could be encoded into the table above and handled in the
+ // switch below.
+ // If either of the casts are a bitcast from scalar to vector, disallow the
+ // merging. However, any pair of bitcasts are allowed.
+ bool IsFirstBitcast = (firstOp == Instruction::BitCast);
+ bool IsSecondBitcast = (secondOp == Instruction::BitCast);
+ bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
+
+ // Check if any of the casts convert scalars <-> vectors.
+ if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
+ (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
+ if (!AreBothBitcasts)
+ return 0;
+
+ int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
+ [secondOp-Instruction::CastOpsBegin];
+ switch (ElimCase) {
+ case 0:
+ // Categorically disallowed.
+ return 0;
+ case 1:
+ // Allowed, use first cast's opcode.
+ return firstOp;
+ case 2:
+ // Allowed, use second cast's opcode.
+ return secondOp;
+ case 3:
+ // No-op cast in second op implies firstOp as long as the DestTy
+ // is integer and we are not converting between a vector and a
+ // non-vector type.
+ if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
+ return firstOp;
+ return 0;
+ case 4:
+ // No-op cast in second op implies firstOp as long as the DestTy
+ // is floating point.
+ if (DstTy->isFloatingPointTy())
+ return firstOp;
+ return 0;
+ case 5:
+ // No-op cast in first op implies secondOp as long as the SrcTy
+ // is an integer.
+ if (SrcTy->isIntegerTy())
+ return secondOp;
+ return 0;
+ case 6:
+ // No-op cast in first op implies secondOp as long as the SrcTy
+ // is a floating point.
+ if (SrcTy->isFloatingPointTy())
+ return secondOp;
+ return 0;
+ case 7: {
+ // Disable inttoptr/ptrtoint optimization if enabled.
+ if (DisableI2pP2iOpt)
+ return 0;
+
+ // Cannot simplify if address spaces are different!
+ if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
+ return 0;
+
+ unsigned MidSize = MidTy->getScalarSizeInBits();
+ // We can still fold this without knowing the actual sizes as long we
+ // know that the intermediate pointer is the largest possible
+ // pointer size.
+ // FIXME: Is this always true?
+ if (MidSize == 64)
+ return Instruction::BitCast;
+
+ // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
+ if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
+ return 0;
+ unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
+ if (MidSize >= PtrSize)
+ return Instruction::BitCast;
+ return 0;
+ }
+ case 8: {
+ // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
+ // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
+ // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
+ unsigned SrcSize = SrcTy->getScalarSizeInBits();
+ unsigned DstSize = DstTy->getScalarSizeInBits();
+ if (SrcTy == DstTy)
+ return Instruction::BitCast;
+ if (SrcSize < DstSize)
+ return firstOp;
+ if (SrcSize > DstSize)
+ return secondOp;
+ return 0;
+ }
+ case 9:
+ // zext, sext -> zext, because sext can't sign extend after zext
+ return Instruction::ZExt;
+ case 11: {
+ // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
+ if (!MidIntPtrTy)
+ return 0;
+ unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
+ unsigned SrcSize = SrcTy->getScalarSizeInBits();
+ unsigned DstSize = DstTy->getScalarSizeInBits();
+ if (SrcSize <= PtrSize && SrcSize == DstSize)
+ return Instruction::BitCast;
+ return 0;
+ }
+ case 12:
+ // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
+ // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
+ if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
+ return Instruction::AddrSpaceCast;
+ return Instruction::BitCast;
+ case 13:
+ // FIXME: this state can be merged with (1), but the following assert
+ // is useful to check the correcteness of the sequence due to semantic
+ // change of bitcast.
+ assert(
+ SrcTy->isPtrOrPtrVectorTy() &&
+ MidTy->isPtrOrPtrVectorTy() &&
+ DstTy->isPtrOrPtrVectorTy() &&
+ SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
+ MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
+ "Illegal addrspacecast, bitcast sequence!");
+ // Allowed, use first cast's opcode
+ return firstOp;
+ case 14: {
+ // bitcast, addrspacecast -> addrspacecast if the element type of
+ // bitcast's source is the same as that of addrspacecast's destination.
+ PointerType *SrcPtrTy = cast<PointerType>(SrcTy->getScalarType());
+ PointerType *DstPtrTy = cast<PointerType>(DstTy->getScalarType());
+ if (SrcPtrTy->hasSameElementTypeAs(DstPtrTy))
+ return Instruction::AddrSpaceCast;
+ return 0;
+ }
+ case 15:
+ // FIXME: this state can be merged with (1), but the following assert
+ // is useful to check the correcteness of the sequence due to semantic
+ // change of bitcast.
+ assert(
+ SrcTy->isIntOrIntVectorTy() &&
+ MidTy->isPtrOrPtrVectorTy() &&
+ DstTy->isPtrOrPtrVectorTy() &&
+ MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
+ "Illegal inttoptr, bitcast sequence!");
+ // Allowed, use first cast's opcode
+ return firstOp;
+ case 16:
+ // FIXME: this state can be merged with (2), but the following assert
+ // is useful to check the correcteness of the sequence due to semantic
+ // change of bitcast.
+ assert(
+ SrcTy->isPtrOrPtrVectorTy() &&
+ MidTy->isPtrOrPtrVectorTy() &&
+ DstTy->isIntOrIntVectorTy() &&
+ SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
+ "Illegal bitcast, ptrtoint sequence!");
+ // Allowed, use second cast's opcode
+ return secondOp;
+ case 17:
+ // (sitofp (zext x)) -> (uitofp x)
+ return Instruction::UIToFP;
+ case 99:
+ // Cast combination can't happen (error in input). This is for all cases
+ // where the MidTy is not the same for the two cast instructions.
+ llvm_unreachable("Invalid Cast Combination");
+ default:
+ llvm_unreachable("Error in CastResults table!!!");
+ }
+}
+
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
+ const Twine &Name, Instruction *InsertBefore) {
+ assert(castIsValid(op, S, Ty) && "Invalid cast!");
+ // Construct and return the appropriate CastInst subclass
+ switch (op) {
+ case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
+ case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
+ case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
+ case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
+ case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
+ case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
+ case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
+ case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
+ case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
+ case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
+ case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
+ case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
+ case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+}
+
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
+ const Twine &Name, BasicBlock *InsertAtEnd) {
+ assert(castIsValid(op, S, Ty) && "Invalid cast!");
+ // Construct and return the appropriate CastInst subclass
+ switch (op) {
+ case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
+ case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
+ case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
+ case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
+ case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
+ case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
+ case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
+ case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
+ case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
+ case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
+ case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
+ case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
+ case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+}
+
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+ return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+ return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+ return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+ return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
+ "Invalid cast");
+ assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
+ assert((!Ty->isVectorTy() ||
+ cast<VectorType>(Ty)->getElementCount() ==
+ cast<VectorType>(S->getType())->getElementCount()) &&
+ "Invalid cast");
+
+ if (Ty->isIntOrIntVectorTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
+
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
+}
+
+/// Create a BitCast or a PtrToInt cast instruction
+CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
+ "Invalid cast");
+ assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
+ assert((!Ty->isVectorTy() ||
+ cast<VectorType>(Ty)->getElementCount() ==
+ cast<VectorType>(S->getType())->getElementCount()) &&
+ "Invalid cast");
+
+ if (Ty->isIntOrIntVectorTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
+
+ return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
+ assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
+
+ if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
+ return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ if (S->getType()->isPointerTy() && Ty->isIntegerTy())
+ return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
+ if (S->getType()->isIntegerTy() && Ty->isPointerTy())
+ return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
+
+ return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+ bool isSigned, const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "Invalid integer cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::Trunc :
+ (isSigned ? Instruction::SExt : Instruction::ZExt)));
+ return Create(opcode, C, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+ bool isSigned, const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::Trunc :
+ (isSigned ? Instruction::SExt : Instruction::ZExt)));
+ return Create(opcode, C, Ty, Name, InsertAtEnd);
+}
+
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
+ return Create(opcode, C, Ty, Name, InsertBefore);
+}
+
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
+ "Invalid cast");
+ unsigned SrcBits = C->getType()->getScalarSizeInBits();
+ unsigned DstBits = Ty->getScalarSizeInBits();
+ Instruction::CastOps opcode =
+ (SrcBits == DstBits ? Instruction::BitCast :
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
+ return Create(opcode, C, Ty, Name, InsertAtEnd);
+}
+
+bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
+ if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
+ return false;
+
+ if (SrcTy == DestTy)
+ return true;
+
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
+ if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
+ // An element by element cast. Valid if casting the elements is valid.
+ SrcTy = SrcVecTy->getElementType();
+ DestTy = DestVecTy->getElementType();
+ }
+ }
+ }
+
+ if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
+ if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
+ return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
+ }
+ }
+
+ TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+
+ // Could still have vectors of pointers if the number of elements doesn't
+ // match
+ if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
+ return false;
+
+ if (SrcBits != DestBits)
+ return false;
+
+ if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
+ return false;
+
+ return true;
+}
+
+bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
+ const DataLayout &DL) {
+ // ptrtoint and inttoptr are not allowed on non-integral pointers
+ if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
+ if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
+ return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
+ !DL.isNonIntegralPointerType(PtrTy));
+ if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
+ if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
+ return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
+ !DL.isNonIntegralPointerType(PtrTy));
+
+ return isBitCastable(SrcTy, DestTy);
+}
+
+// Provide a way to get a "cast" where the cast opcode is inferred from the
+// types and size of the operand. This, basically, is a parallel of the
+// logic in the castIsValid function below. This axiom should hold:
+// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
+// should not assert in castIsValid. In other words, this produces a "correct"
+// casting opcode for the arguments passed to it.
+Instruction::CastOps
+CastInst::getCastOpcode(
+ const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
+ Type *SrcTy = Src->getType();
+
+ assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
+ "Only first class types are castable!");
+
+ if (SrcTy == DestTy)
+ return BitCast;
+
+ // FIXME: Check address space sizes here
+ if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
+ if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
+ if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
+ // An element by element cast. Find the appropriate opcode based on the
+ // element types.
+ SrcTy = SrcVecTy->getElementType();
+ DestTy = DestVecTy->getElementType();
+ }
+
+ // Get the bit sizes, we'll need these
+ unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+
+ // Run through the possibilities ...
+ if (DestTy->isIntegerTy()) { // Casting to integral
+ if (SrcTy->isIntegerTy()) { // Casting from integral
+ if (DestBits < SrcBits)
+ return Trunc; // int -> smaller int
+ else if (DestBits > SrcBits) { // its an extension
+ if (SrcIsSigned)
+ return SExt; // signed -> SEXT
+ else
+ return ZExt; // unsigned -> ZEXT
+ } else {
+ return BitCast; // Same size, No-op cast
+ }
+ } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
+ if (DestIsSigned)
+ return FPToSI; // FP -> sint
+ else
+ return FPToUI; // FP -> uint
+ } else if (SrcTy->isVectorTy()) {
+ assert(DestBits == SrcBits &&
+ "Casting vector to integer of different width");
+ return BitCast; // Same size, no-op cast
+ } else {
+ assert(SrcTy->isPointerTy() &&
+ "Casting from a value that is not first-class type");
+ return PtrToInt; // ptr -> int
+ }
+ } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
+ if (SrcTy->isIntegerTy()) { // Casting from integral
+ if (SrcIsSigned)
+ return SIToFP; // sint -> FP
+ else
+ return UIToFP; // uint -> FP
+ } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
+ if (DestBits < SrcBits) {
+ return FPTrunc; // FP -> smaller FP
+ } else if (DestBits > SrcBits) {
+ return FPExt; // FP -> larger FP
+ } else {
+ return BitCast; // same size, no-op cast
+ }
+ } else if (SrcTy->isVectorTy()) {
+ assert(DestBits == SrcBits &&
+ "Casting vector to floating point of different width");
+ return BitCast; // same size, no-op cast
+ }
+ llvm_unreachable("Casting pointer or non-first class to float");
+ } else if (DestTy->isVectorTy()) {
+ assert(DestBits == SrcBits &&
+ "Illegal cast to vector (wrong type or size)");
+ return BitCast;
+ } else if (DestTy->isPointerTy()) {
+ if (SrcTy->isPointerTy()) {
+ if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
+ return AddrSpaceCast;
+ return BitCast; // ptr -> ptr
+ } else if (SrcTy->isIntegerTy()) {
+ return IntToPtr; // int -> ptr
+ }
+ llvm_unreachable("Casting pointer to other than pointer or int");
+ } else if (DestTy->isX86_MMXTy()) {
+ if (SrcTy->isVectorTy()) {
+ assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
+ return BitCast; // 64-bit vector to MMX
+ }
+ llvm_unreachable("Illegal cast to X86_MMX");
+ }
+ llvm_unreachable("Casting to type that is not first-class");
+}
+
+//===----------------------------------------------------------------------===//
+// CastInst SubClass Constructors
+//===----------------------------------------------------------------------===//
+
+/// Check that the construction parameters for a CastInst are correct. This
+/// could be broken out into the separate constructors but it is useful to have
+/// it in one place and to eliminate the redundant code for getting the sizes
+/// of the types involved.
+bool
+CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
+ if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
+ SrcTy->isAggregateType() || DstTy->isAggregateType())
+ return false;
+
+ // Get the size of the types in bits, and whether we are dealing
+ // with vector types, we'll need this later.
+ bool SrcIsVec = isa<VectorType>(SrcTy);
+ bool DstIsVec = isa<VectorType>(DstTy);
+ unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
+
+ // If these are vector types, get the lengths of the vectors (using zero for
+ // scalar types means that checking that vector lengths match also checks that
+ // scalars are not being converted to vectors or vectors to scalars).
+ ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
+ : ElementCount::getFixed(0);
+ ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
+ : ElementCount::getFixed(0);
+
+ // Switch on the opcode provided
+ switch (op) {
+ default: return false; // This is an input error
+ case Instruction::Trunc:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
+ case Instruction::ZExt:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
+ case Instruction::SExt:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
+ case Instruction::FPTrunc:
+ return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
+ SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
+ case Instruction::FPExt:
+ return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
+ SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
+ SrcEC == DstEC;
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
+ SrcEC == DstEC;
+ case Instruction::PtrToInt:
+ if (SrcEC != DstEC)
+ return false;
+ return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
+ case Instruction::IntToPtr:
+ if (SrcEC != DstEC)
+ return false;
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
+ case Instruction::BitCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+
+ // BitCast implies a no-op cast of type only. No bits change.
+ // However, you can't cast pointers to anything but pointers.
+ if (!SrcPtrTy != !DstPtrTy)
+ return false;
+
+ // For non-pointer cases, the cast is okay if the source and destination bit
+ // widths are identical.
+ if (!SrcPtrTy)
+ return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
+
+ // If both are pointers then the address spaces must match.
+ if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
+ return false;
+
+ // A vector of pointers must have the same number of elements.
+ if (SrcIsVec && DstIsVec)
+ return SrcEC == DstEC;
+ if (SrcIsVec)
+ return SrcEC == ElementCount::getFixed(1);
+ if (DstIsVec)
+ return DstEC == ElementCount::getFixed(1);
+
+ return true;
+ }
+ case Instruction::AddrSpaceCast: {
+ PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
+ if (!SrcPtrTy)
+ return false;
+
+ PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
+ if (!DstPtrTy)
+ return false;
+
+ if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
+ return false;
+
+ return SrcEC == DstEC;
+ }
+ }
+}
+
+TruncInst::TruncInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
+}
+
+TruncInst::TruncInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
+}
+
+ZExtInst::ZExtInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
+}
+
+ZExtInst::ZExtInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
+}
+SExtInst::SExtInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, SExt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
+}
+
+SExtInst::SExtInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
+}
+
+FPTruncInst::FPTruncInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
+}
+
+FPTruncInst::FPTruncInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
+}
+
+FPExtInst::FPExtInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
+}
+
+FPExtInst::FPExtInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
+}
+
+UIToFPInst::UIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
+}
+
+UIToFPInst::UIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
+}
+
+SIToFPInst::SIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
+}
+
+SIToFPInst::SIToFPInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
+}
+
+FPToUIInst::FPToUIInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
+}
+
+FPToUIInst::FPToUIInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
+}
+
+FPToSIInst::FPToSIInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
+}
+
+FPToSIInst::FPToSIInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
+}
+
+PtrToIntInst::PtrToIntInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
+}
+
+PtrToIntInst::PtrToIntInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
+}
+
+IntToPtrInst::IntToPtrInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
+}
+
+IntToPtrInst::IntToPtrInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
+}
+
+BitCastInst::BitCastInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
+}
+
+BitCastInst::BitCastInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
+}
+
+AddrSpaceCastInst::AddrSpaceCastInst(
+ Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
+) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
+}
+
+AddrSpaceCastInst::AddrSpaceCastInst(
+ Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
+) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
+ assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
+}
+
+//===----------------------------------------------------------------------===//
+// CmpInst Classes
+//===----------------------------------------------------------------------===//
+
+CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
+ Value *RHS, const Twine &Name, Instruction *InsertBefore,
+ Instruction *FlagsSource)
+ : Instruction(ty, op,
+ OperandTraits<CmpInst>::op_begin(this),
+ OperandTraits<CmpInst>::operands(this),
+ InsertBefore) {
+ Op<0>() = LHS;
+ Op<1>() = RHS;
+ setPredicate((Predicate)predicate);
+ setName(Name);
+ if (FlagsSource)
+ copyIRFlags(FlagsSource);
+}
+
+CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
+ Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
+ : Instruction(ty, op,
+ OperandTraits<CmpInst>::op_begin(this),
+ OperandTraits<CmpInst>::operands(this),
+ InsertAtEnd) {
+ Op<0>() = LHS;
+ Op<1>() = RHS;
+ setPredicate((Predicate)predicate);
+ setName(Name);
+}
+
+CmpInst *
+CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ const Twine &Name, Instruction *InsertBefore) {
+ if (Op == Instruction::ICmp) {
+ if (InsertBefore)
+ return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ else
+ return new ICmpInst(CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ }
+
+ if (InsertBefore)
+ return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ else
+ return new FCmpInst(CmpInst::Predicate(predicate),
+ S1, S2, Name);
+}
+
+CmpInst *
+CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
+ const Twine &Name, BasicBlock *InsertAtEnd) {
+ if (Op == Instruction::ICmp) {
+ return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+ }
+ return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
+ S1, S2, Name);
+}
+
+void CmpInst::swapOperands() {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
+ IC->swapOperands();
+ else
+ cast<FCmpInst>(this)->swapOperands();
+}
+
+bool CmpInst::isCommutative() const {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
+ return IC->isCommutative();
+ return cast<FCmpInst>(this)->isCommutative();
+}
+
+bool CmpInst::isEquality(Predicate P) {
+ if (ICmpInst::isIntPredicate(P))
+ return ICmpInst::isEquality(P);
+ if (FCmpInst::isFPPredicate(P))
+ return FCmpInst::isEquality(P);
+ llvm_unreachable("Unsupported predicate kind");
+}
+
+CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown cmp predicate!");
+ case ICMP_EQ: return ICMP_NE;
+ case ICMP_NE: return ICMP_EQ;
+ case ICMP_UGT: return ICMP_ULE;
+ case ICMP_ULT: return ICMP_UGE;
+ case ICMP_UGE: return ICMP_ULT;
+ case ICMP_ULE: return ICMP_UGT;
+ case ICMP_SGT: return ICMP_SLE;
+ case ICMP_SLT: return ICMP_SGE;
+ case ICMP_SGE: return ICMP_SLT;
+ case ICMP_SLE: return ICMP_SGT;
+
+ case FCMP_OEQ: return FCMP_UNE;
+ case FCMP_ONE: return FCMP_UEQ;
+ case FCMP_OGT: return FCMP_ULE;
+ case FCMP_OLT: return FCMP_UGE;
+ case FCMP_OGE: return FCMP_ULT;
+ case FCMP_OLE: return FCMP_UGT;
+ case FCMP_UEQ: return FCMP_ONE;
+ case FCMP_UNE: return FCMP_OEQ;
+ case FCMP_UGT: return FCMP_OLE;
+ case FCMP_ULT: return FCMP_OGE;
+ case FCMP_UGE: return FCMP_OLT;
+ case FCMP_ULE: return FCMP_OGT;
+ case FCMP_ORD: return FCMP_UNO;
+ case FCMP_UNO: return FCMP_ORD;
+ case FCMP_TRUE: return FCMP_FALSE;
+ case FCMP_FALSE: return FCMP_TRUE;
+ }
+}
+
+StringRef CmpInst::getPredicateName(Predicate Pred) {
+ switch (Pred) {
+ default: return "unknown";
+ case FCmpInst::FCMP_FALSE: return "false";
+ case FCmpInst::FCMP_OEQ: return "oeq";
+ case FCmpInst::FCMP_OGT: return "ogt";
+ case FCmpInst::FCMP_OGE: return "oge";
+ case FCmpInst::FCMP_OLT: return "olt";
+ case FCmpInst::FCMP_OLE: return "ole";
+ case FCmpInst::FCMP_ONE: return "one";
+ case FCmpInst::FCMP_ORD: return "ord";
+ case FCmpInst::FCMP_UNO: return "uno";
+ case FCmpInst::FCMP_UEQ: return "ueq";
+ case FCmpInst::FCMP_UGT: return "ugt";
+ case FCmpInst::FCMP_UGE: return "uge";
+ case FCmpInst::FCMP_ULT: return "ult";
+ case FCmpInst::FCMP_ULE: return "ule";
+ case FCmpInst::FCMP_UNE: return "une";
+ case FCmpInst::FCMP_TRUE: return "true";
+ case ICmpInst::ICMP_EQ: return "eq";
+ case ICmpInst::ICMP_NE: return "ne";
+ case ICmpInst::ICMP_SGT: return "sgt";
+ case ICmpInst::ICMP_SGE: return "sge";
+ case ICmpInst::ICMP_SLT: return "slt";
+ case ICmpInst::ICMP_SLE: return "sle";
+ case ICmpInst::ICMP_UGT: return "ugt";
+ case ICmpInst::ICMP_UGE: return "uge";
+ case ICmpInst::ICMP_ULT: return "ult";
+ case ICmpInst::ICMP_ULE: return "ule";
+ }
+}
+
+ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown icmp predicate!");
+ case ICMP_EQ: case ICMP_NE:
+ case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
+ return pred;
+ case ICMP_UGT: return ICMP_SGT;
+ case ICMP_ULT: return ICMP_SLT;
+ case ICMP_UGE: return ICMP_SGE;
+ case ICMP_ULE: return ICMP_SLE;
+ }
+}
+
+ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown icmp predicate!");
+ case ICMP_EQ: case ICMP_NE:
+ case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
+ return pred;
+ case ICMP_SGT: return ICMP_UGT;
+ case ICMP_SLT: return ICMP_ULT;
+ case ICMP_SGE: return ICMP_UGE;
+ case ICMP_SLE: return ICMP_ULE;
+ }
+}
+
+CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
+ switch (pred) {
+ default: llvm_unreachable("Unknown cmp predicate!");
+ case ICMP_EQ: case ICMP_NE:
+ return pred;
+ case ICMP_SGT: return ICMP_SLT;
+ case ICMP_SLT: return ICMP_SGT;
+ case ICMP_SGE: return ICMP_SLE;
+ case ICMP_SLE: return ICMP_SGE;
+ case ICMP_UGT: return ICMP_ULT;
+ case ICMP_ULT: return ICMP_UGT;
+ case ICMP_UGE: return ICMP_ULE;
+ case ICMP_ULE: return ICMP_UGE;
+
+ case FCMP_FALSE: case FCMP_TRUE:
+ case FCMP_OEQ: case FCMP_ONE:
+ case FCMP_UEQ: case FCMP_UNE:
+ case FCMP_ORD: case FCMP_UNO:
+ return pred;
+ case FCMP_OGT: return FCMP_OLT;
+ case FCMP_OLT: return FCMP_OGT;
+ case FCMP_OGE: return FCMP_OLE;
+ case FCMP_OLE: return FCMP_OGE;
+ case FCMP_UGT: return FCMP_ULT;
+ case FCMP_ULT: return FCMP_UGT;
+ case FCMP_UGE: return FCMP_ULE;
+ case FCMP_ULE: return FCMP_UGE;
+ }
+}
+
+bool CmpInst::isNonStrictPredicate(Predicate pred) {
+ switch (pred) {
+ case ICMP_SGE:
+ case ICMP_SLE:
+ case ICMP_UGE:
+ case ICMP_ULE:
+ case FCMP_OGE:
+ case FCMP_OLE:
+ case FCMP_UGE:
+ case FCMP_ULE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool CmpInst::isStrictPredicate(Predicate pred) {
+ switch (pred) {
+ case ICMP_SGT:
+ case ICMP_SLT:
+ case ICMP_UGT:
+ case ICMP_ULT:
+ case FCMP_OGT:
+ case FCMP_OLT:
+ case FCMP_UGT:
+ case FCMP_ULT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+CmpInst::Predicate CmpInst::getStrictPredicate(Predicate pred) {
+ switch (pred) {
+ case ICMP_SGE:
+ return ICMP_SGT;
+ case ICMP_SLE:
+ return ICMP_SLT;
+ case ICMP_UGE:
+ return ICMP_UGT;
+ case ICMP_ULE:
+ return ICMP_ULT;
+ case FCMP_OGE:
+ return FCMP_OGT;
+ case FCMP_OLE:
+ return FCMP_OLT;
+ case FCMP_UGE:
+ return FCMP_UGT;
+ case FCMP_ULE:
+ return FCMP_ULT;
+ default:
+ return pred;
+ }
+}
+
+CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
+ switch (pred) {
+ case ICMP_SGT:
+ return ICMP_SGE;
+ case ICMP_SLT:
+ return ICMP_SLE;
+ case ICMP_UGT:
+ return ICMP_UGE;
+ case ICMP_ULT:
+ return ICMP_ULE;
+ case FCMP_OGT:
+ return FCMP_OGE;
+ case FCMP_OLT:
+ return FCMP_OLE;
+ case FCMP_UGT:
+ return FCMP_UGE;
+ case FCMP_ULT:
+ return FCMP_ULE;
+ default:
+ return pred;
+ }
+}
+
+CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
+ assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
+
+ if (isStrictPredicate(pred))
+ return getNonStrictPredicate(pred);
+ if (isNonStrictPredicate(pred))
+ return getStrictPredicate(pred);
+
+ llvm_unreachable("Unknown predicate!");
+}
+
+CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
+ assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
+
+ switch (pred) {
+ default:
+ llvm_unreachable("Unknown predicate!");
+ case CmpInst::ICMP_ULT:
+ return CmpInst::ICMP_SLT;
+ case CmpInst::ICMP_ULE:
+ return CmpInst::ICMP_SLE;
+ case CmpInst::ICMP_UGT:
+ return CmpInst::ICMP_SGT;
+ case CmpInst::ICMP_UGE:
+ return CmpInst::ICMP_SGE;
+ }
+}
+
+CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
+ assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
+
+ switch (pred) {
+ default:
+ llvm_unreachable("Unknown predicate!");
+ case CmpInst::ICMP_SLT:
+ return CmpInst::ICMP_ULT;
+ case CmpInst::ICMP_SLE:
+ return CmpInst::ICMP_ULE;
+ case CmpInst::ICMP_SGT:
+ return CmpInst::ICMP_UGT;
+ case CmpInst::ICMP_SGE:
+ return CmpInst::ICMP_UGE;
+ }
+}
+
+bool CmpInst::isUnsigned(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_UGE: return true;
+ }
+}
+
+bool CmpInst::isSigned(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SGE: return true;
+ }
+}
+
+bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
+ ICmpInst::Predicate Pred) {
+ assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
+ switch (Pred) {
+ case ICmpInst::Predicate::ICMP_EQ:
+ return LHS.eq(RHS);
+ case ICmpInst::Predicate::ICMP_NE:
+ return LHS.ne(RHS);
+ case ICmpInst::Predicate::ICMP_UGT:
+ return LHS.ugt(RHS);
+ case ICmpInst::Predicate::ICMP_UGE:
+ return LHS.uge(RHS);
+ case ICmpInst::Predicate::ICMP_ULT:
+ return LHS.ult(RHS);
+ case ICmpInst::Predicate::ICMP_ULE:
+ return LHS.ule(RHS);
+ case ICmpInst::Predicate::ICMP_SGT:
+ return LHS.sgt(RHS);
+ case ICmpInst::Predicate::ICMP_SGE:
+ return LHS.sge(RHS);
+ case ICmpInst::Predicate::ICMP_SLT:
+ return LHS.slt(RHS);
+ case ICmpInst::Predicate::ICMP_SLE:
+ return LHS.sle(RHS);
+ default:
+ llvm_unreachable("Unexpected non-integer predicate.");
+ };
+}
+
+bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
+ FCmpInst::Predicate Pred) {
+ APFloat::cmpResult R = LHS.compare(RHS);
+ switch (Pred) {
+ default:
+ llvm_unreachable("Invalid FCmp Predicate");
+ case FCmpInst::FCMP_FALSE:
+ return false;
+ case FCmpInst::FCMP_TRUE:
+ return true;
+ case FCmpInst::FCMP_UNO:
+ return R == APFloat::cmpUnordered;
+ case FCmpInst::FCMP_ORD:
+ return R != APFloat::cmpUnordered;
+ case FCmpInst::FCMP_UEQ:
+ return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
+ case FCmpInst::FCMP_OEQ:
+ return R == APFloat::cmpEqual;
+ case FCmpInst::FCMP_UNE:
+ return R != APFloat::cmpEqual;
+ case FCmpInst::FCMP_ONE:
+ return R == APFloat::cmpLessThan || R == APFloat::cmpGreaterThan;
+ case FCmpInst::FCMP_ULT:
+ return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
+ case FCmpInst::FCMP_OLT:
+ return R == APFloat::cmpLessThan;
+ case FCmpInst::FCMP_UGT:
+ return R == APFloat::cmpUnordered || R == APFloat::cmpGreaterThan;
+ case FCmpInst::FCMP_OGT:
+ return R == APFloat::cmpGreaterThan;
+ case FCmpInst::FCMP_ULE:
+ return R != APFloat::cmpGreaterThan;
+ case FCmpInst::FCMP_OLE:
+ return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
+ case FCmpInst::FCMP_UGE:
+ return R != APFloat::cmpLessThan;
+ case FCmpInst::FCMP_OGE:
+ return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
+ }
+}
+
+CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
+ assert(CmpInst::isRelational(pred) &&
+ "Call only with non-equality predicates!");
+
+ if (isSigned(pred))
+ return getUnsignedPredicate(pred);
+ if (isUnsigned(pred))
+ return getSignedPredicate(pred);
+
+ llvm_unreachable("Unknown predicate!");
+}
+
+bool CmpInst::isOrdered(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
+ case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
+ case FCmpInst::FCMP_ORD: return true;
+ }
+}
+
+bool CmpInst::isUnordered(Predicate predicate) {
+ switch (predicate) {
+ default: return false;
+ case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_UNO: return true;
+ }
+}
+
+bool CmpInst::isTrueWhenEqual(Predicate predicate) {
+ switch(predicate) {
+ default: return false;
+ case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
+ case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
+ }
+}
+
+bool CmpInst::isFalseWhenEqual(Predicate predicate) {
+ switch(predicate) {
+ case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
+ case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
+ default: return false;
+ }
+}
+
+bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
+ // If the predicates match, then we know the first condition implies the
+ // second is true.
+ if (Pred1 == Pred2)
+ return true;
+
+ switch (Pred1) {
+ default:
+ break;
+ case ICMP_EQ:
+ // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
+ return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
+ Pred2 == ICMP_SLE;
+ case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
+ return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
+ case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
+ return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
+ case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
+ return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
+ case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
+ return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
+ }
+ return false;
+}
+
+bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
+ return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
+}
+
+//===----------------------------------------------------------------------===//
+// SwitchInst Implementation
+//===----------------------------------------------------------------------===//
+
+void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
+ assert(Value && Default && NumReserved);
+ ReservedSpace = NumReserved;
+ setNumHungOffUseOperands(2);
+ allocHungoffUses(ReservedSpace);
+
+ Op<0>() = Value;
+ Op<1>() = Default;
+}
+
+/// SwitchInst ctor - Create a new switch instruction, specifying a value to
+/// switch on and a default destination. The number of additional cases can
+/// be specified here to make memory allocation more efficient. This
+/// constructor can also autoinsert before another instruction.
+SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
+ nullptr, 0, InsertBefore) {
+ init(Value, Default, 2+NumCases*2);
+}
+
+/// SwitchInst ctor - Create a new switch instruction, specifying a value to
+/// switch on and a default destination. The number of additional cases can
+/// be specified here to make memory allocation more efficient. This
+/// constructor also autoinserts at the end of the specified BasicBlock.
+SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
+ nullptr, 0, InsertAtEnd) {
+ init(Value, Default, 2+NumCases*2);
+}
+
+SwitchInst::SwitchInst(const SwitchInst &SI)
+ : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
+ init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
+ setNumHungOffUseOperands(SI.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = SI.getOperandList();
+ for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
+ OL[i] = InOL[i];
+ OL[i+1] = InOL[i+1];
+ }
+ SubclassOptionalData = SI.SubclassOptionalData;
+}
+
+/// addCase - Add an entry to the switch instruction...
+///
+void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
+ unsigned NewCaseIdx = getNumCases();
+ unsigned OpNo = getNumOperands();
+ if (OpNo+2 > ReservedSpace)
+ growOperands(); // Get more space!
+ // Initialize some new operands.
+ assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(OpNo+2);
+ CaseHandle Case(this, NewCaseIdx);
+ Case.setValue(OnVal);
+ Case.setSuccessor(Dest);
+}
+
+/// removeCase - This method removes the specified case and its successor
+/// from the switch instruction.
+SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
+ unsigned idx = I->getCaseIndex();
+
+ assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
+
+ unsigned NumOps = getNumOperands();
+ Use *OL = getOperandList();
+
+ // Overwrite this case with the end of the list.
+ if (2 + (idx + 1) * 2 != NumOps) {
+ OL[2 + idx * 2] = OL[NumOps - 2];
+ OL[2 + idx * 2 + 1] = OL[NumOps - 1];
+ }
+
+ // Nuke the last value.
+ OL[NumOps-2].set(nullptr);
+ OL[NumOps-2+1].set(nullptr);
+ setNumHungOffUseOperands(NumOps-2);
+
+ return CaseIt(this, idx);
+}
+
+/// growOperands - grow operands - This grows the operand list in response
+/// to a push_back style of operation. This grows the number of ops by 3 times.
+///
+void SwitchInst::growOperands() {
+ unsigned e = getNumOperands();
+ unsigned NumOps = e*3;
+
+ ReservedSpace = NumOps;
+ growHungoffUses(ReservedSpace);
+}
+
+MDNode *
+SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) {
+ if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof))
+ if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0)))
+ if (MDName->getString() == "branch_weights")
+ return ProfileData;
+ return nullptr;
+}
+
+MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
+ assert(Changed && "called only if metadata has changed");
+
+ if (!Weights)
+ return nullptr;
+
+ assert(SI.getNumSuccessors() == Weights->size() &&
+ "num of prof branch_weights must accord with num of successors");
+
+ bool AllZeroes = all_of(Weights.value(), [](uint32_t W) { return W == 0; });
+
+ if (AllZeroes || Weights.value().size() < 2)
+ return nullptr;
+
+ return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
+}
+
+void SwitchInstProfUpdateWrapper::init() {
+ MDNode *ProfileData = getProfBranchWeightsMD(SI);
+ if (!ProfileData)
+ return;
+
+ if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
+ llvm_unreachable("number of prof branch_weights metadata operands does "
+ "not correspond to number of succesors");
+ }
+
+ SmallVector<uint32_t, 8> Weights;
+ for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) {
+ ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI));
+ uint32_t CW = C->getValue().getZExtValue();
+ Weights.push_back(CW);
+ }
+ this->Weights = std::move(Weights);
+}
+
+SwitchInst::CaseIt
+SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
+ if (Weights) {
+ assert(SI.getNumSuccessors() == Weights->size() &&
+ "num of prof branch_weights must accord with num of successors");
+ Changed = true;
+ // Copy the last case to the place of the removed one and shrink.
+ // This is tightly coupled with the way SwitchInst::removeCase() removes
+ // the cases in SwitchInst::removeCase(CaseIt).
+ Weights.value()[I->getCaseIndex() + 1] = Weights.value().back();
+ Weights.value().pop_back();
+ }
+ return SI.removeCase(I);
+}
+
+void SwitchInstProfUpdateWrapper::addCase(
+ ConstantInt *OnVal, BasicBlock *Dest,
+ SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
+ SI.addCase(OnVal, Dest);
+
+ if (!Weights && W && *W) {
+ Changed = true;
+ Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
+ Weights.value()[SI.getNumSuccessors() - 1] = *W;
+ } else if (Weights) {
+ Changed = true;
+ Weights.value().push_back(W.value_or(0));
+ }
+ if (Weights)
+ assert(SI.getNumSuccessors() == Weights->size() &&
+ "num of prof branch_weights must accord with num of successors");
+}
+
+SymbolTableList<Instruction>::iterator
+SwitchInstProfUpdateWrapper::eraseFromParent() {
+ // Instruction is erased. Mark as unchanged to not touch it in the destructor.
+ Changed = false;
+ if (Weights)
+ Weights->resize(0);
+ return SI.eraseFromParent();
+}
+
+SwitchInstProfUpdateWrapper::CaseWeightOpt
+SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
+ if (!Weights)
+ return None;
+ return (*Weights)[idx];
+}
+
+void SwitchInstProfUpdateWrapper::setSuccessorWeight(
+ unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
+ if (!W)
+ return;
+
+ if (!Weights && *W)
+ Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
+
+ if (Weights) {
+ auto &OldW = (*Weights)[idx];
+ if (*W != OldW) {
+ Changed = true;
+ OldW = *W;
+ }
+ }
+}
+
+SwitchInstProfUpdateWrapper::CaseWeightOpt
+SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
+ unsigned idx) {
+ if (MDNode *ProfileData = getProfBranchWeightsMD(SI))
+ if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
+ return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
+ ->getValue()
+ .getZExtValue();
+
+ return None;
+}
+
+//===----------------------------------------------------------------------===//
+// IndirectBrInst Implementation
+//===----------------------------------------------------------------------===//
+
+void IndirectBrInst::init(Value *Address, unsigned NumDests) {
+ assert(Address && Address->getType()->isPointerTy() &&
+ "Address of indirectbr must be a pointer");
+ ReservedSpace = 1+NumDests;
+ setNumHungOffUseOperands(1);
+ allocHungoffUses(ReservedSpace);
+
+ Op<0>() = Address;
+}
+
+
+/// growOperands - grow operands - This grows the operand list in response
+/// to a push_back style of operation. This grows the number of ops by 2 times.
+///
+void IndirectBrInst::growOperands() {
+ unsigned e = getNumOperands();
+ unsigned NumOps = e*2;
+
+ ReservedSpace = NumOps;
+ growHungoffUses(ReservedSpace);
+}
+
+IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
+ Instruction *InsertBefore)
+ : Instruction(Type::getVoidTy(Address->getContext()),
+ Instruction::IndirectBr, nullptr, 0, InsertBefore) {
+ init(Address, NumCases);
+}
+
+IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(Address->getContext()),
+ Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
+ init(Address, NumCases);
+}
+
+IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
+ : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
+ nullptr, IBI.getNumOperands()) {
+ allocHungoffUses(IBI.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = IBI.getOperandList();
+ for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
+ OL[i] = InOL[i];
+ SubclassOptionalData = IBI.SubclassOptionalData;
+}
+
+/// addDestination - Add a destination.
+///
+void IndirectBrInst::addDestination(BasicBlock *DestBB) {
+ unsigned OpNo = getNumOperands();
+ if (OpNo+1 > ReservedSpace)
+ growOperands(); // Get more space!
+ // Initialize some new operands.
+ assert(OpNo < ReservedSpace && "Growing didn't work!");
+ setNumHungOffUseOperands(OpNo+1);
+ getOperandList()[OpNo] = DestBB;
+}
+
+/// removeDestination - This method removes the specified successor from the
+/// indirectbr instruction.
+void IndirectBrInst::removeDestination(unsigned idx) {
+ assert(idx < getNumOperands()-1 && "Successor index out of range!");
+
+ unsigned NumOps = getNumOperands();
+ Use *OL = getOperandList();
+
+ // Replace this value with the last one.
+ OL[idx+1] = OL[NumOps-1];
+
+ // Nuke the last value.
+ OL[NumOps-1].set(nullptr);
+ setNumHungOffUseOperands(NumOps-1);
+}
+
+//===----------------------------------------------------------------------===//
+// FreezeInst Implementation
+//===----------------------------------------------------------------------===//
+
+FreezeInst::FreezeInst(Value *S,
+ const Twine &Name, Instruction *InsertBefore)
+ : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
+ setName(Name);
+}
+
+FreezeInst::FreezeInst(Value *S,
+ const Twine &Name, BasicBlock *InsertAtEnd)
+ : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
+ setName(Name);
+}
+
+//===----------------------------------------------------------------------===//
+// cloneImpl() implementations
+//===----------------------------------------------------------------------===//
+
+// Define these methods here so vtables don't get emitted into every translation
+// unit that uses these classes.
+
+GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
+ return new (getNumOperands()) GetElementPtrInst(*this);
+}
+
+UnaryOperator *UnaryOperator::cloneImpl() const {
+ return Create(getOpcode(), Op<0>());
+}
+
+BinaryOperator *BinaryOperator::cloneImpl() const {
+ return Create(getOpcode(), Op<0>(), Op<1>());
+}
+
+FCmpInst *FCmpInst::cloneImpl() const {
+ return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
+}
+
+ICmpInst *ICmpInst::cloneImpl() const {
+ return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
+}
+
+ExtractValueInst *ExtractValueInst::cloneImpl() const {
+ return new ExtractValueInst(*this);
+}
+
+InsertValueInst *InsertValueInst::cloneImpl() const {
+ return new InsertValueInst(*this);
+}
+
+AllocaInst *AllocaInst::cloneImpl() const {
+ AllocaInst *Result =
+ new AllocaInst(getAllocatedType(), getType()->getAddressSpace(),
+ getOperand(0), getAlign());
+ Result->setUsedWithInAlloca(isUsedWithInAlloca());
+ Result->setSwiftError(isSwiftError());
+ return Result;
+}
+
+LoadInst *LoadInst::cloneImpl() const {
+ return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
+ getAlign(), getOrdering(), getSyncScopeID());
+}
+
+StoreInst *StoreInst::cloneImpl() const {
+ return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
+ getOrdering(), getSyncScopeID());
+}
+
+AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
+ AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
+ getOperand(0), getOperand(1), getOperand(2), getAlign(),
+ getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
+ Result->setVolatile(isVolatile());
+ Result->setWeak(isWeak());
+ return Result;
+}
+
+AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
+ AtomicRMWInst *Result =
+ new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
+ getAlign(), getOrdering(), getSyncScopeID());
+ Result->setVolatile(isVolatile());
+ return Result;
+}
+
+FenceInst *FenceInst::cloneImpl() const {
+ return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
+}
+
+TruncInst *TruncInst::cloneImpl() const {
+ return new TruncInst(getOperand(0), getType());
+}
+
+ZExtInst *ZExtInst::cloneImpl() const {
+ return new ZExtInst(getOperand(0), getType());
+}
+
+SExtInst *SExtInst::cloneImpl() const {
+ return new SExtInst(getOperand(0), getType());
+}
+
+FPTruncInst *FPTruncInst::cloneImpl() const {
+ return new FPTruncInst(getOperand(0), getType());
+}
+
+FPExtInst *FPExtInst::cloneImpl() const {
+ return new FPExtInst(getOperand(0), getType());
+}
+
+UIToFPInst *UIToFPInst::cloneImpl() const {
+ return new UIToFPInst(getOperand(0), getType());
+}
+
+SIToFPInst *SIToFPInst::cloneImpl() const {
+ return new SIToFPInst(getOperand(0), getType());
+}
+
+FPToUIInst *FPToUIInst::cloneImpl() const {
+ return new FPToUIInst(getOperand(0), getType());
+}
+
+FPToSIInst *FPToSIInst::cloneImpl() const {
+ return new FPToSIInst(getOperand(0), getType());
+}
+
+PtrToIntInst *PtrToIntInst::cloneImpl() const {
+ return new PtrToIntInst(getOperand(0), getType());
+}
+
+IntToPtrInst *IntToPtrInst::cloneImpl() const {
+ return new IntToPtrInst(getOperand(0), getType());
+}
+
+BitCastInst *BitCastInst::cloneImpl() const {
+ return new BitCastInst(getOperand(0), getType());
+}
+
+AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
+ return new AddrSpaceCastInst(getOperand(0), getType());
+}
+
+CallInst *CallInst::cloneImpl() const {
+ if (hasOperandBundles()) {
+ unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
+ return new(getNumOperands(), DescriptorBytes) CallInst(*this);
+ }
+ return new(getNumOperands()) CallInst(*this);
+}
+
+SelectInst *SelectInst::cloneImpl() const {
+ return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
+}
+
+VAArgInst *VAArgInst::cloneImpl() const {
+ return new VAArgInst(getOperand(0), getType());
+}
+
+ExtractElementInst *ExtractElementInst::cloneImpl() const {
+ return ExtractElementInst::Create(getOperand(0), getOperand(1));
+}
+
+InsertElementInst *InsertElementInst::cloneImpl() const {
+ return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
+}
+
+ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
+ return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
+}
+
+PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
+
+LandingPadInst *LandingPadInst::cloneImpl() const {
+ return new LandingPadInst(*this);
+}
+
+ReturnInst *ReturnInst::cloneImpl() const {
+ return new(getNumOperands()) ReturnInst(*this);
+}
+
+BranchInst *BranchInst::cloneImpl() const {
+ return new(getNumOperands()) BranchInst(*this);
+}
+
+SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
+
+IndirectBrInst *IndirectBrInst::cloneImpl() const {
+ return new IndirectBrInst(*this);
+}
+
+InvokeInst *InvokeInst::cloneImpl() const {
+ if (hasOperandBundles()) {
+ unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
+ return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
+ }
+ return new(getNumOperands()) InvokeInst(*this);
+}
+
+CallBrInst *CallBrInst::cloneImpl() const {
+ if (hasOperandBundles()) {
+ unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
+ return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
+ }
+ return new (getNumOperands()) CallBrInst(*this);
+}
+
+ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
+
+CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
+ return new (getNumOperands()) CleanupReturnInst(*this);
+}
+
+CatchReturnInst *CatchReturnInst::cloneImpl() const {
+ return new (getNumOperands()) CatchReturnInst(*this);
+}
+
+CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
+ return new CatchSwitchInst(*this);
+}
+
+FuncletPadInst *FuncletPadInst::cloneImpl() const {
+ return new (getNumOperands()) FuncletPadInst(*this);
+}
+
+UnreachableInst *UnreachableInst::cloneImpl() const {
+ LLVMContext &Context = getContext();
+ return new UnreachableInst(Context);
+}
+
+FreezeInst *FreezeInst::cloneImpl() const {
+ return new FreezeInst(getOperand(0));
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp b/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
new file mode 100644
index 000000000000..65a9a32ad2c5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/IntrinsicInst.cpp
@@ -0,0 +1,726 @@
+//===-- IntrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements methods that make it really easy to deal with intrinsic
+// functions.
+//
+// All intrinsic function calls are instances of the call instruction, so these
+// are all subclasses of the CallInst class. Note that none of these classes
+// has state or virtual methods, which is an important part of this gross/neat
+// hack working.
+//
+// In some cases, arguments to intrinsics need to be generic and are defined as
+// type pointer to empty struct { }*. To access the real item of interest the
+// cast instruction needs to be stripped away.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Statepoint.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+/// DbgVariableIntrinsic - This is the common base class for debug info
+/// intrinsics for variables.
+///
+
+iterator_range<DbgVariableIntrinsic::location_op_iterator>
+DbgVariableIntrinsic::location_ops() const {
+ auto *MD = getRawLocation();
+ assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
+
+ // If operand is ValueAsMetadata, return a range over just that operand.
+ if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
+ return {location_op_iterator(VAM), location_op_iterator(VAM + 1)};
+ }
+ // If operand is DIArgList, return a range over its args.
+ if (auto *AL = dyn_cast<DIArgList>(MD))
+ return {location_op_iterator(AL->args_begin()),
+ location_op_iterator(AL->args_end())};
+ // Operand must be an empty metadata tuple, so return empty iterator.
+ return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)),
+ location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))};
+}
+
+Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const {
+ auto *MD = getRawLocation();
+ assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
+ if (auto *AL = dyn_cast<DIArgList>(MD))
+ return AL->getArgs()[OpIdx]->getValue();
+ if (isa<MDNode>(MD))
+ return nullptr;
+ assert(
+ isa<ValueAsMetadata>(MD) &&
+ "Attempted to get location operand from DbgVariableIntrinsic with none.");
+ auto *V = cast<ValueAsMetadata>(MD);
+ assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a "
+ "single location operand.");
+ return V->getValue();
+}
+
+static ValueAsMetadata *getAsMetadata(Value *V) {
+ return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
+ cast<MetadataAsValue>(V)->getMetadata())
+ : ValueAsMetadata::get(V);
+}
+
+void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue,
+ Value *NewValue) {
+ assert(NewValue && "Values must be non-null");
+ auto Locations = location_ops();
+ auto OldIt = find(Locations, OldValue);
+ assert(OldIt != Locations.end() && "OldValue must be a current location");
+ if (!hasArgList()) {
+ Value *NewOperand = isa<MetadataAsValue>(NewValue)
+ ? NewValue
+ : MetadataAsValue::get(
+ getContext(), ValueAsMetadata::get(NewValue));
+ return setArgOperand(0, NewOperand);
+ }
+ SmallVector<ValueAsMetadata *, 4> MDs;
+ ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
+ for (auto *VMD : Locations)
+ MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD));
+ setArgOperand(
+ 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
+}
+void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx,
+ Value *NewValue) {
+ assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index");
+ if (!hasArgList()) {
+ Value *NewOperand = isa<MetadataAsValue>(NewValue)
+ ? NewValue
+ : MetadataAsValue::get(
+ getContext(), ValueAsMetadata::get(NewValue));
+ return setArgOperand(0, NewOperand);
+ }
+ SmallVector<ValueAsMetadata *, 4> MDs;
+ ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
+ for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx)
+ MDs.push_back(Idx == OpIdx ? NewOperand
+ : getAsMetadata(getVariableLocationOp(Idx)));
+ setArgOperand(
+ 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
+}
+
+void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues,
+ DIExpression *NewExpr) {
+ assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() +
+ NewValues.size()) &&
+ "NewExpr for debug variable intrinsic does not reference every "
+ "location operand.");
+ assert(!is_contained(NewValues, nullptr) && "New values must be non-null");
+ setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr));
+ SmallVector<ValueAsMetadata *, 4> MDs;
+ for (auto *VMD : location_ops())
+ MDs.push_back(getAsMetadata(VMD));
+ for (auto *VMD : NewValues)
+ MDs.push_back(getAsMetadata(VMD));
+ setArgOperand(
+ 0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
+}
+
+Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
+ if (auto Fragment = getExpression()->getFragmentInfo())
+ return Fragment->SizeInBits;
+ return getVariable()->getSizeInBits();
+}
+
+int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
+ StringRef Name) {
+ assert(Name.startswith("llvm."));
+
+ // Do successive binary searches of the dotted name components. For
+ // "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
+ // intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
+ // "llvm.gc.experimental.statepoint", and then we will stop as the range is
+ // size 1. During the search, we can skip the prefix that we already know is
+ // identical. By using strncmp we consider names with differing suffixes to
+ // be part of the equal range.
+ size_t CmpEnd = 4; // Skip the "llvm" component.
+ const char *const *Low = NameTable.begin();
+ const char *const *High = NameTable.end();
+ const char *const *LastLow = Low;
+ while (CmpEnd < Name.size() && High - Low > 0) {
+ size_t CmpStart = CmpEnd;
+ CmpEnd = Name.find('.', CmpStart + 1);
+ CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
+ auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
+ return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
+ };
+ LastLow = Low;
+ std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
+ }
+ if (High - Low > 0)
+ LastLow = Low;
+
+ if (LastLow == NameTable.end())
+ return -1;
+ StringRef NameFound = *LastLow;
+ if (Name == NameFound ||
+ (Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
+ return LastLow - NameTable.begin();
+ return -1;
+}
+
+ConstantInt *InstrProfInstBase::getNumCounters() const {
+ if (InstrProfValueProfileInst::classof(this))
+ llvm_unreachable("InstrProfValueProfileInst does not have counters!");
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
+}
+
+ConstantInt *InstrProfInstBase::getIndex() const {
+ if (InstrProfValueProfileInst::classof(this))
+ llvm_unreachable("Please use InstrProfValueProfileInst::getIndex()");
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+}
+
+Value *InstrProfIncrementInst::getStep() const {
+ if (InstrProfIncrementInstStep::classof(this)) {
+ return const_cast<Value *>(getArgOperand(4));
+ }
+ const Module *M = getModule();
+ LLVMContext &Context = M->getContext();
+ return ConstantInt::get(Type::getInt64Ty(Context), 1);
+}
+
+Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
+ unsigned NumOperands = arg_size();
+ Metadata *MD = nullptr;
+ auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
+ if (MAV)
+ MD = MAV->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return None;
+ return convertStrToRoundingMode(cast<MDString>(MD)->getString());
+}
+
+Optional<fp::ExceptionBehavior>
+ConstrainedFPIntrinsic::getExceptionBehavior() const {
+ unsigned NumOperands = arg_size();
+ Metadata *MD = nullptr;
+ auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
+ if (MAV)
+ MD = MAV->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return None;
+ return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
+}
+
+bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
+ Optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
+ if (Except) {
+ if (Except.value() != fp::ebIgnore)
+ return false;
+ }
+
+ Optional<RoundingMode> Rounding = getRoundingMode();
+ if (Rounding) {
+ if (Rounding.value() != RoundingMode::NearestTiesToEven)
+ return false;
+ }
+
+ return true;
+}
+
+static FCmpInst::Predicate getFPPredicateFromMD(const Value *Op) {
+ Metadata *MD = cast<MetadataAsValue>(Op)->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return FCmpInst::BAD_FCMP_PREDICATE;
+ return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString())
+ .Case("oeq", FCmpInst::FCMP_OEQ)
+ .Case("ogt", FCmpInst::FCMP_OGT)
+ .Case("oge", FCmpInst::FCMP_OGE)
+ .Case("olt", FCmpInst::FCMP_OLT)
+ .Case("ole", FCmpInst::FCMP_OLE)
+ .Case("one", FCmpInst::FCMP_ONE)
+ .Case("ord", FCmpInst::FCMP_ORD)
+ .Case("uno", FCmpInst::FCMP_UNO)
+ .Case("ueq", FCmpInst::FCMP_UEQ)
+ .Case("ugt", FCmpInst::FCMP_UGT)
+ .Case("uge", FCmpInst::FCMP_UGE)
+ .Case("ult", FCmpInst::FCMP_ULT)
+ .Case("ule", FCmpInst::FCMP_ULE)
+ .Case("une", FCmpInst::FCMP_UNE)
+ .Default(FCmpInst::BAD_FCMP_PREDICATE);
+}
+
+FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const {
+ return getFPPredicateFromMD(getArgOperand(2));
+}
+
+bool ConstrainedFPIntrinsic::isUnaryOp() const {
+ switch (getIntrinsicID()) {
+ default:
+ return false;
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC: \
+ return NARG == 1;
+#include "llvm/IR/ConstrainedOps.def"
+ }
+}
+
+bool ConstrainedFPIntrinsic::isTernaryOp() const {
+ switch (getIntrinsicID()) {
+ default:
+ return false;
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC: \
+ return NARG == 3;
+#include "llvm/IR/ConstrainedOps.def"
+ }
+}
+
+bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC:
+#include "llvm/IR/ConstrainedOps.def"
+ return true;
+ default:
+ return false;
+ }
+}
+
+ElementCount VPIntrinsic::getStaticVectorLength() const {
+ auto GetVectorLengthOfType = [](const Type *T) -> ElementCount {
+ const auto *VT = cast<VectorType>(T);
+ auto ElemCount = VT->getElementCount();
+ return ElemCount;
+ };
+
+ Value *VPMask = getMaskParam();
+ if (!VPMask) {
+ assert((getIntrinsicID() == Intrinsic::vp_merge ||
+ getIntrinsicID() == Intrinsic::vp_select) &&
+ "Unexpected VP intrinsic without mask operand");
+ return GetVectorLengthOfType(getType());
+ }
+ return GetVectorLengthOfType(VPMask->getType());
+}
+
+Value *VPIntrinsic::getMaskParam() const {
+ if (auto MaskPos = getMaskParamPos(getIntrinsicID()))
+ return getArgOperand(*MaskPos);
+ return nullptr;
+}
+
+void VPIntrinsic::setMaskParam(Value *NewMask) {
+ auto MaskPos = getMaskParamPos(getIntrinsicID());
+ setArgOperand(*MaskPos, NewMask);
+}
+
+Value *VPIntrinsic::getVectorLengthParam() const {
+ if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID()))
+ return getArgOperand(*EVLPos);
+ return nullptr;
+}
+
+void VPIntrinsic::setVectorLengthParam(Value *NewEVL) {
+ auto EVLPos = getVectorLengthParamPos(getIntrinsicID());
+ setArgOperand(*EVLPos, NewEVL);
+}
+
+Optional<unsigned> VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) {
+ switch (IntrinsicID) {
+ default:
+ return None;
+
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
+ case Intrinsic::VPID: \
+ return MASKPOS;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+}
+
+Optional<unsigned>
+VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
+ switch (IntrinsicID) {
+ default:
+ return None;
+
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
+ case Intrinsic::VPID: \
+ return VLENPOS;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+}
+
+/// \return the alignment of the pointer used by this load/store/gather or
+/// scatter.
+MaybeAlign VPIntrinsic::getPointerAlignment() const {
+ Optional<unsigned> PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID());
+ assert(PtrParamOpt && "no pointer argument!");
+ return getParamAlign(PtrParamOpt.value());
+}
+
+/// \return The pointer operand of this load,store, gather or scatter.
+Value *VPIntrinsic::getMemoryPointerParam() const {
+ if (auto PtrParamOpt = getMemoryPointerParamPos(getIntrinsicID()))
+ return getArgOperand(PtrParamOpt.value());
+ return nullptr;
+}
+
+Optional<unsigned> VPIntrinsic::getMemoryPointerParamPos(Intrinsic::ID VPID) {
+ switch (VPID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_MEMOP(POINTERPOS, ...) return POINTERPOS;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return None;
+}
+
+/// \return The data (payload) operand of this store or scatter.
+Value *VPIntrinsic::getMemoryDataParam() const {
+ auto DataParamOpt = getMemoryDataParamPos(getIntrinsicID());
+ if (!DataParamOpt)
+ return nullptr;
+ return getArgOperand(DataParamOpt.value());
+}
+
+Optional<unsigned> VPIntrinsic::getMemoryDataParamPos(Intrinsic::ID VPID) {
+ switch (VPID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS) return DATAPOS;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return None;
+}
+
+bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) {
+ switch (ID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
+ case Intrinsic::VPID: \
+ return true;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return false;
+}
+
+// Equivalent non-predicated opcode
+Optional<unsigned> VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) {
+ switch (ID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_FUNCTIONAL_OPC(OPC) return Instruction::OPC;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return None;
+}
+
+Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) {
+ switch (IROPC) {
+ default:
+ break;
+
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) break;
+#define VP_PROPERTY_FUNCTIONAL_OPC(OPC) case Instruction::OPC:
+#define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return Intrinsic::not_intrinsic;
+}
+
+bool VPIntrinsic::canIgnoreVectorLengthParam() const {
+ using namespace PatternMatch;
+
+ ElementCount EC = getStaticVectorLength();
+
+ // No vlen param - no lanes masked-off by it.
+ auto *VLParam = getVectorLengthParam();
+ if (!VLParam)
+ return true;
+
+ // Note that the VP intrinsic causes undefined behavior if the Explicit Vector
+ // Length parameter is strictly greater-than the number of vector elements of
+ // the operation. This function returns true when this is detected statically
+ // in the IR.
+
+ // Check whether "W == vscale * EC.getKnownMinValue()"
+ if (EC.isScalable()) {
+ // Undig the DL
+ const auto *ParMod = this->getModule();
+ if (!ParMod)
+ return false;
+ const auto &DL = ParMod->getDataLayout();
+
+ // Compare vscale patterns
+ uint64_t VScaleFactor;
+ if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL))))
+ return VScaleFactor >= EC.getKnownMinValue();
+ return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL));
+ }
+
+ // standard SIMD operation
+ const auto *VLConst = dyn_cast<ConstantInt>(VLParam);
+ if (!VLConst)
+ return false;
+
+ uint64_t VLNum = VLConst->getZExtValue();
+ if (VLNum >= EC.getKnownMinValue())
+ return true;
+
+ return false;
+}
+
+Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
+ Type *ReturnType,
+ ArrayRef<Value *> Params) {
+ assert(isVPIntrinsic(VPID) && "not a VP intrinsic");
+ Function *VPFunc;
+ switch (VPID) {
+ default: {
+ Type *OverloadTy = Params[0]->getType();
+ if (VPReductionIntrinsic::isVPReduction(VPID))
+ OverloadTy =
+ Params[*VPReductionIntrinsic::getVectorParamPos(VPID)]->getType();
+
+ VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy);
+ break;
+ }
+ case Intrinsic::vp_trunc:
+ case Intrinsic::vp_sext:
+ case Intrinsic::vp_zext:
+ case Intrinsic::vp_fptoui:
+ case Intrinsic::vp_fptosi:
+ case Intrinsic::vp_uitofp:
+ case Intrinsic::vp_sitofp:
+ case Intrinsic::vp_fptrunc:
+ case Intrinsic::vp_fpext:
+ case Intrinsic::vp_ptrtoint:
+ case Intrinsic::vp_inttoptr:
+ VPFunc =
+ Intrinsic::getDeclaration(M, VPID, {ReturnType, Params[0]->getType()});
+ break;
+ case Intrinsic::vp_merge:
+ case Intrinsic::vp_select:
+ VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
+ break;
+ case Intrinsic::vp_load:
+ VPFunc = Intrinsic::getDeclaration(
+ M, VPID, {ReturnType, Params[0]->getType()});
+ break;
+ case Intrinsic::experimental_vp_strided_load:
+ VPFunc = Intrinsic::getDeclaration(
+ M, VPID, {ReturnType, Params[0]->getType(), Params[1]->getType()});
+ break;
+ case Intrinsic::vp_gather:
+ VPFunc = Intrinsic::getDeclaration(
+ M, VPID, {ReturnType, Params[0]->getType()});
+ break;
+ case Intrinsic::vp_store:
+ VPFunc = Intrinsic::getDeclaration(
+ M, VPID, {Params[0]->getType(), Params[1]->getType()});
+ break;
+ case Intrinsic::experimental_vp_strided_store:
+ VPFunc = Intrinsic::getDeclaration(
+ M, VPID,
+ {Params[0]->getType(), Params[1]->getType(), Params[2]->getType()});
+ break;
+ case Intrinsic::vp_scatter:
+ VPFunc = Intrinsic::getDeclaration(
+ M, VPID, {Params[0]->getType(), Params[1]->getType()});
+ break;
+ }
+ assert(VPFunc && "Could not declare VP intrinsic");
+ return VPFunc;
+}
+
+bool VPReductionIntrinsic::isVPReduction(Intrinsic::ID ID) {
+ switch (ID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return false;
+}
+
+bool VPCastIntrinsic::isVPCast(Intrinsic::ID ID) {
+ switch (ID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_CASTOP return true;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return false;
+}
+
+bool VPCmpIntrinsic::isVPCmp(Intrinsic::ID ID) {
+ switch (ID) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_CMP(CCPOS, ...) return true;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ return false;
+}
+
+static ICmpInst::Predicate getIntPredicateFromMD(const Value *Op) {
+ Metadata *MD = cast<MetadataAsValue>(Op)->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return ICmpInst::BAD_ICMP_PREDICATE;
+ return StringSwitch<ICmpInst::Predicate>(cast<MDString>(MD)->getString())
+ .Case("eq", ICmpInst::ICMP_EQ)
+ .Case("ne", ICmpInst::ICMP_NE)
+ .Case("ugt", ICmpInst::ICMP_UGT)
+ .Case("uge", ICmpInst::ICMP_UGE)
+ .Case("ult", ICmpInst::ICMP_ULT)
+ .Case("ule", ICmpInst::ICMP_ULE)
+ .Case("sgt", ICmpInst::ICMP_SGT)
+ .Case("sge", ICmpInst::ICMP_SGE)
+ .Case("slt", ICmpInst::ICMP_SLT)
+ .Case("sle", ICmpInst::ICMP_SLE)
+ .Default(ICmpInst::BAD_ICMP_PREDICATE);
+}
+
+CmpInst::Predicate VPCmpIntrinsic::getPredicate() const {
+ bool IsFP = true;
+ Optional<unsigned> CCArgIdx;
+ switch (getIntrinsicID()) {
+ default:
+ break;
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_CMP(CCPOS, ISFP) \
+ CCArgIdx = CCPOS; \
+ IsFP = ISFP; \
+ break;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ }
+ assert(CCArgIdx && "Unexpected vector-predicated comparison");
+ return IsFP ? getFPPredicateFromMD(getArgOperand(*CCArgIdx))
+ : getIntPredicateFromMD(getArgOperand(*CCArgIdx));
+}
+
+unsigned VPReductionIntrinsic::getVectorParamPos() const {
+ return *VPReductionIntrinsic::getVectorParamPos(getIntrinsicID());
+}
+
+unsigned VPReductionIntrinsic::getStartParamPos() const {
+ return *VPReductionIntrinsic::getStartParamPos(getIntrinsicID());
+}
+
+Optional<unsigned> VPReductionIntrinsic::getVectorParamPos(Intrinsic::ID ID) {
+ switch (ID) {
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return VECTORPOS;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ default:
+ break;
+ }
+ return None;
+}
+
+Optional<unsigned> VPReductionIntrinsic::getStartParamPos(Intrinsic::ID ID) {
+ switch (ID) {
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS) return STARTPOS;
+#define END_REGISTER_VP_INTRINSIC(VPID) break;
+#include "llvm/IR/VPIntrinsics.def"
+ default:
+ break;
+ }
+ return None;
+}
+
+Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::sadd_sat:
+ return Instruction::Add;
+ case Intrinsic::usub_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::usub_sat:
+ case Intrinsic::ssub_sat:
+ return Instruction::Sub;
+ case Intrinsic::umul_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ return Instruction::Mul;
+ default:
+ llvm_unreachable("Invalid intrinsic");
+ }
+}
+
+bool BinaryOpIntrinsic::isSigned() const {
+ switch (getIntrinsicID()) {
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::ssub_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::ssub_sat:
+ return true;
+ default:
+ return false;
+ }
+}
+
+unsigned BinaryOpIntrinsic::getNoWrapKind() const {
+ if (isSigned())
+ return OverflowingBinaryOperator::NoSignedWrap;
+ else
+ return OverflowingBinaryOperator::NoUnsignedWrap;
+}
+
+const GCStatepointInst *GCProjectionInst::getStatepoint() const {
+ const Value *Token = getArgOperand(0);
+
+ // This takes care both of relocates for call statepoints and relocates
+ // on normal path of invoke statepoint.
+ if (!isa<LandingPadInst>(Token))
+ return cast<GCStatepointInst>(Token);
+
+ // This relocate is on exceptional path of an invoke statepoint
+ const BasicBlock *InvokeBB =
+ cast<Instruction>(Token)->getParent()->getUniquePredecessor();
+
+ assert(InvokeBB && "safepoints should have unique landingpads");
+ assert(InvokeBB->getTerminator() &&
+ "safepoint block should be well formed");
+
+ return cast<GCStatepointInst>(InvokeBB->getTerminator());
+}
+
+Value *GCRelocateInst::getBasePtr() const {
+ if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
+ return *(Opt->Inputs.begin() + getBasePtrIndex());
+ return *(getStatepoint()->arg_begin() + getBasePtrIndex());
+}
+
+Value *GCRelocateInst::getDerivedPtr() const {
+ if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
+ return *(Opt->Inputs.begin() + getDerivedPtrIndex());
+ return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/LLVMContext.cpp b/contrib/llvm-project/llvm/lib/IR/LLVMContext.cpp
new file mode 100644
index 000000000000..4a1d5d3dcdf6
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/LLVMContext.cpp
@@ -0,0 +1,380 @@
+//===-- LLVMContext.cpp - Implement LLVMContext ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements LLVMContext, as a wrapper around the opaque
+// class LLVMContextImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/LLVMContext.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/Remarks/RemarkStreamer.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdlib>
+#include <string>
+#include <utility>
+
+using namespace llvm;
+
+LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
+ // Create the fixed metadata kinds. This is done in the same order as the
+ // MD_* enum values so that they correspond.
+ std::pair<unsigned, StringRef> MDKinds[] = {
+#define LLVM_FIXED_MD_KIND(EnumID, Name, Value) {EnumID, Name},
+#include "llvm/IR/FixedMetadataKinds.def"
+#undef LLVM_FIXED_MD_KIND
+ };
+
+ for (auto &MDKind : MDKinds) {
+ unsigned ID = getMDKindID(MDKind.second);
+ assert(ID == MDKind.first && "metadata kind id drifted");
+ (void)ID;
+ }
+
+ auto *DeoptEntry = pImpl->getOrInsertBundleTag("deopt");
+ assert(DeoptEntry->second == LLVMContext::OB_deopt &&
+ "deopt operand bundle id drifted!");
+ (void)DeoptEntry;
+
+ auto *FuncletEntry = pImpl->getOrInsertBundleTag("funclet");
+ assert(FuncletEntry->second == LLVMContext::OB_funclet &&
+ "funclet operand bundle id drifted!");
+ (void)FuncletEntry;
+
+ auto *GCTransitionEntry = pImpl->getOrInsertBundleTag("gc-transition");
+ assert(GCTransitionEntry->second == LLVMContext::OB_gc_transition &&
+ "gc-transition operand bundle id drifted!");
+ (void)GCTransitionEntry;
+
+ auto *CFGuardTargetEntry = pImpl->getOrInsertBundleTag("cfguardtarget");
+ assert(CFGuardTargetEntry->second == LLVMContext::OB_cfguardtarget &&
+ "cfguardtarget operand bundle id drifted!");
+ (void)CFGuardTargetEntry;
+
+ auto *PreallocatedEntry = pImpl->getOrInsertBundleTag("preallocated");
+ assert(PreallocatedEntry->second == LLVMContext::OB_preallocated &&
+ "preallocated operand bundle id drifted!");
+ (void)PreallocatedEntry;
+
+ auto *GCLiveEntry = pImpl->getOrInsertBundleTag("gc-live");
+ assert(GCLiveEntry->second == LLVMContext::OB_gc_live &&
+ "gc-transition operand bundle id drifted!");
+ (void)GCLiveEntry;
+
+ auto *ClangAttachedCall =
+ pImpl->getOrInsertBundleTag("clang.arc.attachedcall");
+ assert(ClangAttachedCall->second == LLVMContext::OB_clang_arc_attachedcall &&
+ "clang.arc.attachedcall operand bundle id drifted!");
+ (void)ClangAttachedCall;
+
+ auto *PtrauthEntry = pImpl->getOrInsertBundleTag("ptrauth");
+ assert(PtrauthEntry->second == LLVMContext::OB_ptrauth &&
+ "ptrauth operand bundle id drifted!");
+ (void)PtrauthEntry;
+
+ SyncScope::ID SingleThreadSSID =
+ pImpl->getOrInsertSyncScopeID("singlethread");
+ assert(SingleThreadSSID == SyncScope::SingleThread &&
+ "singlethread synchronization scope ID drifted!");
+ (void)SingleThreadSSID;
+
+ SyncScope::ID SystemSSID =
+ pImpl->getOrInsertSyncScopeID("");
+ assert(SystemSSID == SyncScope::System &&
+ "system synchronization scope ID drifted!");
+ (void)SystemSSID;
+}
+
+LLVMContext::~LLVMContext() { delete pImpl; }
+
+void LLVMContext::addModule(Module *M) {
+ pImpl->OwnedModules.insert(M);
+}
+
+void LLVMContext::removeModule(Module *M) {
+ pImpl->OwnedModules.erase(M);
+}
+
+//===----------------------------------------------------------------------===//
+// Recoverable Backend Errors
+//===----------------------------------------------------------------------===//
+
+void LLVMContext::setDiagnosticHandlerCallBack(
+ DiagnosticHandler::DiagnosticHandlerTy DiagnosticHandler,
+ void *DiagnosticContext, bool RespectFilters) {
+ pImpl->DiagHandler->DiagHandlerCallback = DiagnosticHandler;
+ pImpl->DiagHandler->DiagnosticContext = DiagnosticContext;
+ pImpl->RespectDiagnosticFilters = RespectFilters;
+}
+
+void LLVMContext::setDiagnosticHandler(std::unique_ptr<DiagnosticHandler> &&DH,
+ bool RespectFilters) {
+ pImpl->DiagHandler = std::move(DH);
+ pImpl->RespectDiagnosticFilters = RespectFilters;
+}
+
+void LLVMContext::setDiagnosticsHotnessRequested(bool Requested) {
+ pImpl->DiagnosticsHotnessRequested = Requested;
+}
+bool LLVMContext::getDiagnosticsHotnessRequested() const {
+ return pImpl->DiagnosticsHotnessRequested;
+}
+
+void LLVMContext::setDiagnosticsHotnessThreshold(Optional<uint64_t> Threshold) {
+ pImpl->DiagnosticsHotnessThreshold = Threshold;
+}
+void LLVMContext::setMisExpectWarningRequested(bool Requested) {
+ pImpl->MisExpectWarningRequested = Requested;
+}
+bool LLVMContext::getMisExpectWarningRequested() const {
+ return pImpl->MisExpectWarningRequested;
+}
+uint64_t LLVMContext::getDiagnosticsHotnessThreshold() const {
+ return pImpl->DiagnosticsHotnessThreshold.value_or(UINT64_MAX);
+}
+void LLVMContext::setDiagnosticsMisExpectTolerance(
+ Optional<uint64_t> Tolerance) {
+ pImpl->DiagnosticsMisExpectTolerance = Tolerance;
+}
+uint64_t LLVMContext::getDiagnosticsMisExpectTolerance() const {
+ return pImpl->DiagnosticsMisExpectTolerance.value_or(0);
+}
+
+bool LLVMContext::isDiagnosticsHotnessThresholdSetFromPSI() const {
+ return !pImpl->DiagnosticsHotnessThreshold.has_value();
+}
+
+remarks::RemarkStreamer *LLVMContext::getMainRemarkStreamer() {
+ return pImpl->MainRemarkStreamer.get();
+}
+const remarks::RemarkStreamer *LLVMContext::getMainRemarkStreamer() const {
+ return const_cast<LLVMContext *>(this)->getMainRemarkStreamer();
+}
+void LLVMContext::setMainRemarkStreamer(
+ std::unique_ptr<remarks::RemarkStreamer> RemarkStreamer) {
+ pImpl->MainRemarkStreamer = std::move(RemarkStreamer);
+}
+
+LLVMRemarkStreamer *LLVMContext::getLLVMRemarkStreamer() {
+ return pImpl->LLVMRS.get();
+}
+const LLVMRemarkStreamer *LLVMContext::getLLVMRemarkStreamer() const {
+ return const_cast<LLVMContext *>(this)->getLLVMRemarkStreamer();
+}
+void LLVMContext::setLLVMRemarkStreamer(
+ std::unique_ptr<LLVMRemarkStreamer> RemarkStreamer) {
+ pImpl->LLVMRS = std::move(RemarkStreamer);
+}
+
+DiagnosticHandler::DiagnosticHandlerTy
+LLVMContext::getDiagnosticHandlerCallBack() const {
+ return pImpl->DiagHandler->DiagHandlerCallback;
+}
+
+void *LLVMContext::getDiagnosticContext() const {
+ return pImpl->DiagHandler->DiagnosticContext;
+}
+
+void LLVMContext::setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle)
+{
+ pImpl->YieldCallback = Callback;
+ pImpl->YieldOpaqueHandle = OpaqueHandle;
+}
+
+void LLVMContext::yield() {
+ if (pImpl->YieldCallback)
+ pImpl->YieldCallback(this, pImpl->YieldOpaqueHandle);
+}
+
+void LLVMContext::emitError(const Twine &ErrorStr) {
+ diagnose(DiagnosticInfoInlineAsm(ErrorStr));
+}
+
+void LLVMContext::emitError(const Instruction *I, const Twine &ErrorStr) {
+ assert (I && "Invalid instruction");
+ diagnose(DiagnosticInfoInlineAsm(*I, ErrorStr));
+}
+
+static bool isDiagnosticEnabled(const DiagnosticInfo &DI) {
+ // Optimization remarks are selective. They need to check whether the regexp
+ // pattern, passed via one of the -pass-remarks* flags, matches the name of
+ // the pass that is emitting the diagnostic. If there is no match, ignore the
+ // diagnostic and return.
+ //
+ // Also noisy remarks are only enabled if we have hotness information to sort
+ // them.
+ if (auto *Remark = dyn_cast<DiagnosticInfoOptimizationBase>(&DI))
+ return Remark->isEnabled() &&
+ (!Remark->isVerbose() || Remark->getHotness());
+
+ return true;
+}
+
+const char *
+LLVMContext::getDiagnosticMessagePrefix(DiagnosticSeverity Severity) {
+ switch (Severity) {
+ case DS_Error:
+ return "error";
+ case DS_Warning:
+ return "warning";
+ case DS_Remark:
+ return "remark";
+ case DS_Note:
+ return "note";
+ }
+ llvm_unreachable("Unknown DiagnosticSeverity");
+}
+
+void LLVMContext::diagnose(const DiagnosticInfo &DI) {
+ if (auto *OptDiagBase = dyn_cast<DiagnosticInfoOptimizationBase>(&DI))
+ if (LLVMRemarkStreamer *RS = getLLVMRemarkStreamer())
+ RS->emit(*OptDiagBase);
+
+ // If there is a report handler, use it.
+ if (pImpl->DiagHandler &&
+ (!pImpl->RespectDiagnosticFilters || isDiagnosticEnabled(DI)) &&
+ pImpl->DiagHandler->handleDiagnostics(DI))
+ return;
+
+ if (!isDiagnosticEnabled(DI))
+ return;
+
+ // Otherwise, print the message with a prefix based on the severity.
+ DiagnosticPrinterRawOStream DP(errs());
+ errs() << getDiagnosticMessagePrefix(DI.getSeverity()) << ": ";
+ DI.print(DP);
+ errs() << "\n";
+ if (DI.getSeverity() == DS_Error)
+ exit(1);
+}
+
+void LLVMContext::emitError(uint64_t LocCookie, const Twine &ErrorStr) {
+ diagnose(DiagnosticInfoInlineAsm(LocCookie, ErrorStr));
+}
+
+//===----------------------------------------------------------------------===//
+// Metadata Kind Uniquing
+//===----------------------------------------------------------------------===//
+
+/// Return a unique non-zero ID for the specified metadata kind.
+unsigned LLVMContext::getMDKindID(StringRef Name) const {
+ // If this is new, assign it its ID.
+ return pImpl->CustomMDKindNames.insert(
+ std::make_pair(
+ Name, pImpl->CustomMDKindNames.size()))
+ .first->second;
+}
+
+/// getHandlerNames - Populate client-supplied smallvector using custom
+/// metadata name and ID.
+void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const {
+ Names.resize(pImpl->CustomMDKindNames.size());
+ for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(),
+ E = pImpl->CustomMDKindNames.end(); I != E; ++I)
+ Names[I->second] = I->first();
+}
+
+void LLVMContext::getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const {
+ pImpl->getOperandBundleTags(Tags);
+}
+
+StringMapEntry<uint32_t> *
+LLVMContext::getOrInsertBundleTag(StringRef TagName) const {
+ return pImpl->getOrInsertBundleTag(TagName);
+}
+
+uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
+ return pImpl->getOperandBundleTagID(Tag);
+}
+
+SyncScope::ID LLVMContext::getOrInsertSyncScopeID(StringRef SSN) {
+ return pImpl->getOrInsertSyncScopeID(SSN);
+}
+
+void LLVMContext::getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const {
+ pImpl->getSyncScopeNames(SSNs);
+}
+
+void LLVMContext::setGC(const Function &Fn, std::string GCName) {
+ auto It = pImpl->GCNames.find(&Fn);
+
+ if (It == pImpl->GCNames.end()) {
+ pImpl->GCNames.insert(std::make_pair(&Fn, std::move(GCName)));
+ return;
+ }
+ It->second = std::move(GCName);
+}
+
+const std::string &LLVMContext::getGC(const Function &Fn) {
+ return pImpl->GCNames[&Fn];
+}
+
+void LLVMContext::deleteGC(const Function &Fn) {
+ pImpl->GCNames.erase(&Fn);
+}
+
+bool LLVMContext::shouldDiscardValueNames() const {
+ return pImpl->DiscardValueNames;
+}
+
+bool LLVMContext::isODRUniquingDebugTypes() const { return !!pImpl->DITypeMap; }
+
+void LLVMContext::enableDebugTypeODRUniquing() {
+ if (pImpl->DITypeMap)
+ return;
+
+ pImpl->DITypeMap.emplace();
+}
+
+void LLVMContext::disableDebugTypeODRUniquing() { pImpl->DITypeMap.reset(); }
+
+void LLVMContext::setDiscardValueNames(bool Discard) {
+ pImpl->DiscardValueNames = Discard;
+}
+
+OptPassGate &LLVMContext::getOptPassGate() const {
+ return pImpl->getOptPassGate();
+}
+
+void LLVMContext::setOptPassGate(OptPassGate& OPG) {
+ pImpl->setOptPassGate(OPG);
+}
+
+const DiagnosticHandler *LLVMContext::getDiagHandlerPtr() const {
+ return pImpl->DiagHandler.get();
+}
+
+std::unique_ptr<DiagnosticHandler> LLVMContext::getDiagnosticHandler() {
+ return std::move(pImpl->DiagHandler);
+}
+
+bool LLVMContext::hasSetOpaquePointersValue() const {
+ return pImpl->hasOpaquePointersValue();
+}
+
+void LLVMContext::setOpaquePointers(bool Enable) const {
+ pImpl->setOpaquePointers(Enable);
+}
+
+bool LLVMContext::supportsTypedPointers() const {
+ return !pImpl->getOpaquePointers();
+}
+
+Any &LLVMContext::getTargetData() const {
+ return pImpl->TargetDataStorage;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.cpp b/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.cpp
new file mode 100644
index 000000000000..d7aaf0008564
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.cpp
@@ -0,0 +1,265 @@
+//===- LLVMContextImpl.cpp - Implement LLVMContextImpl --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the opaque LLVMContextImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LLVMContextImpl.h"
+#include "AttributeImpl.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMapEntry.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/DiagnosticHandler.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/OptBisect.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/Remarks/RemarkStreamer.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TypeSize.h"
+#include <cassert>
+#include <utility>
+
+using namespace llvm;
+
+static cl::opt<bool>
+ OpaquePointersCL("opaque-pointers", cl::desc("Use opaque pointers"),
+ cl::init(true));
+
+LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
+ : DiagHandler(std::make_unique<DiagnosticHandler>()),
+ VoidTy(C, Type::VoidTyID), LabelTy(C, Type::LabelTyID),
+ HalfTy(C, Type::HalfTyID), BFloatTy(C, Type::BFloatTyID),
+ FloatTy(C, Type::FloatTyID), DoubleTy(C, Type::DoubleTyID),
+ MetadataTy(C, Type::MetadataTyID), TokenTy(C, Type::TokenTyID),
+ X86_FP80Ty(C, Type::X86_FP80TyID), FP128Ty(C, Type::FP128TyID),
+ PPC_FP128Ty(C, Type::PPC_FP128TyID), X86_MMXTy(C, Type::X86_MMXTyID),
+ X86_AMXTy(C, Type::X86_AMXTyID), Int1Ty(C, 1), Int8Ty(C, 8),
+ Int16Ty(C, 16), Int32Ty(C, 32), Int64Ty(C, 64), Int128Ty(C, 128) {
+ if (OpaquePointersCL.getNumOccurrences()) {
+ OpaquePointers = OpaquePointersCL;
+ }
+}
+
+LLVMContextImpl::~LLVMContextImpl() {
+ // NOTE: We need to delete the contents of OwnedModules, but Module's dtor
+ // will call LLVMContextImpl::removeModule, thus invalidating iterators into
+ // the container. Avoid iterators during this operation:
+ while (!OwnedModules.empty())
+ delete *OwnedModules.begin();
+
+#ifndef NDEBUG
+ // Check for metadata references from leaked Values.
+ for (auto &Pair : ValueMetadata)
+ Pair.first->dump();
+ assert(ValueMetadata.empty() && "Values with metadata have been leaked");
+#endif
+
+ // Drop references for MDNodes. Do this before Values get deleted to avoid
+ // unnecessary RAUW when nodes are still unresolved.
+ for (auto *I : DistinctMDNodes) {
+ // We may have DIArgList that were uniqued, and as it has a custom
+ // implementation of dropAllReferences, it needs to be explicitly invoked.
+ if (auto *AL = dyn_cast<DIArgList>(I)) {
+ AL->dropAllReferences();
+ continue;
+ }
+ I->dropAllReferences();
+ }
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ for (auto *I : CLASS##s) \
+ I->dropAllReferences();
+#include "llvm/IR/Metadata.def"
+
+ // Also drop references that come from the Value bridges.
+ for (auto &Pair : ValuesAsMetadata)
+ Pair.second->dropUsers();
+ for (auto &Pair : MetadataAsValues)
+ Pair.second->dropUse();
+
+ // Destroy MDNodes.
+ for (MDNode *I : DistinctMDNodes)
+ I->deleteAsSubclass();
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ for (CLASS * I : CLASS##s) \
+ delete I;
+#include "llvm/IR/Metadata.def"
+
+ // Free the constants.
+ for (auto *I : ExprConstants)
+ I->dropAllReferences();
+ for (auto *I : ArrayConstants)
+ I->dropAllReferences();
+ for (auto *I : StructConstants)
+ I->dropAllReferences();
+ for (auto *I : VectorConstants)
+ I->dropAllReferences();
+ ExprConstants.freeConstants();
+ ArrayConstants.freeConstants();
+ StructConstants.freeConstants();
+ VectorConstants.freeConstants();
+ InlineAsms.freeConstants();
+
+ CAZConstants.clear();
+ CPNConstants.clear();
+ UVConstants.clear();
+ PVConstants.clear();
+ IntConstants.clear();
+ FPConstants.clear();
+ CDSConstants.clear();
+
+ // Destroy attribute node lists.
+ for (FoldingSetIterator<AttributeSetNode> I = AttrsSetNodes.begin(),
+ E = AttrsSetNodes.end(); I != E; ) {
+ FoldingSetIterator<AttributeSetNode> Elem = I++;
+ delete &*Elem;
+ }
+
+ // Destroy MetadataAsValues.
+ {
+ SmallVector<MetadataAsValue *, 8> MDVs;
+ MDVs.reserve(MetadataAsValues.size());
+ for (auto &Pair : MetadataAsValues)
+ MDVs.push_back(Pair.second);
+ MetadataAsValues.clear();
+ for (auto *V : MDVs)
+ delete V;
+ }
+
+ // Destroy ValuesAsMetadata.
+ for (auto &Pair : ValuesAsMetadata)
+ delete Pair.second;
+}
+
+void LLVMContextImpl::dropTriviallyDeadConstantArrays() {
+ SmallSetVector<ConstantArray *, 4> WorkList;
+
+ // When ArrayConstants are of substantial size and only a few in them are
+ // dead, starting WorkList with all elements of ArrayConstants can be
+ // wasteful. Instead, starting WorkList with only elements that have empty
+ // uses.
+ for (ConstantArray *C : ArrayConstants)
+ if (C->use_empty())
+ WorkList.insert(C);
+
+ while (!WorkList.empty()) {
+ ConstantArray *C = WorkList.pop_back_val();
+ if (C->use_empty()) {
+ for (const Use &Op : C->operands()) {
+ if (auto *COp = dyn_cast<ConstantArray>(Op))
+ WorkList.insert(COp);
+ }
+ C->destroyConstant();
+ }
+ }
+}
+
+void Module::dropTriviallyDeadConstantArrays() {
+ Context.pImpl->dropTriviallyDeadConstantArrays();
+}
+
+namespace llvm {
+
+/// Make MDOperand transparent for hashing.
+///
+/// This overload of an implementation detail of the hashing library makes
+/// MDOperand hash to the same value as a \a Metadata pointer.
+///
+/// Note that overloading \a hash_value() as follows:
+///
+/// \code
+/// size_t hash_value(const MDOperand &X) { return hash_value(X.get()); }
+/// \endcode
+///
+/// does not cause MDOperand to be transparent. In particular, a bare pointer
+/// doesn't get hashed before it's combined, whereas \a MDOperand would.
+static const Metadata *get_hashable_data(const MDOperand &X) { return X.get(); }
+
+} // end namespace llvm
+
+unsigned MDNodeOpsKey::calculateHash(MDNode *N, unsigned Offset) {
+ unsigned Hash = hash_combine_range(N->op_begin() + Offset, N->op_end());
+#ifndef NDEBUG
+ {
+ SmallVector<Metadata *, 8> MDs(drop_begin(N->operands(), Offset));
+ unsigned RawHash = calculateHash(MDs);
+ assert(Hash == RawHash &&
+ "Expected hash of MDOperand to equal hash of Metadata*");
+ }
+#endif
+ return Hash;
+}
+
+unsigned MDNodeOpsKey::calculateHash(ArrayRef<Metadata *> Ops) {
+ return hash_combine_range(Ops.begin(), Ops.end());
+}
+
+StringMapEntry<uint32_t> *LLVMContextImpl::getOrInsertBundleTag(StringRef Tag) {
+ uint32_t NewIdx = BundleTagCache.size();
+ return &*(BundleTagCache.insert(std::make_pair(Tag, NewIdx)).first);
+}
+
+void LLVMContextImpl::getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const {
+ Tags.resize(BundleTagCache.size());
+ for (const auto &T : BundleTagCache)
+ Tags[T.second] = T.first();
+}
+
+uint32_t LLVMContextImpl::getOperandBundleTagID(StringRef Tag) const {
+ auto I = BundleTagCache.find(Tag);
+ assert(I != BundleTagCache.end() && "Unknown tag!");
+ return I->second;
+}
+
+SyncScope::ID LLVMContextImpl::getOrInsertSyncScopeID(StringRef SSN) {
+ auto NewSSID = SSC.size();
+ assert(NewSSID < std::numeric_limits<SyncScope::ID>::max() &&
+ "Hit the maximum number of synchronization scopes allowed!");
+ return SSC.insert(std::make_pair(SSN, SyncScope::ID(NewSSID))).first->second;
+}
+
+void LLVMContextImpl::getSyncScopeNames(
+ SmallVectorImpl<StringRef> &SSNs) const {
+ SSNs.resize(SSC.size());
+ for (const auto &SSE : SSC)
+ SSNs[SSE.second] = SSE.first();
+}
+
+/// Gets the OptPassGate for this LLVMContextImpl, which defaults to the
+/// singleton OptBisect if not explicitly set.
+OptPassGate &LLVMContextImpl::getOptPassGate() const {
+ if (!OPG)
+ OPG = &getOptBisector();
+ return *OPG;
+}
+
+void LLVMContextImpl::setOptPassGate(OptPassGate& OPG) {
+ this->OPG = &OPG;
+}
+
+bool LLVMContextImpl::hasOpaquePointersValue() {
+ return OpaquePointers.has_value();
+}
+
+bool LLVMContextImpl::getOpaquePointers() {
+ if (LLVM_UNLIKELY(!OpaquePointers))
+ OpaquePointers = OpaquePointersCL;
+ return *OpaquePointers;
+}
+
+void LLVMContextImpl::setOpaquePointers(bool OP) {
+ assert((!OpaquePointers || OpaquePointers.value() == OP) &&
+ "Cannot change opaque pointers mode once set");
+ OpaquePointers = OP;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h b/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h
new file mode 100644
index 000000000000..47add940f603
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/LLVMContextImpl.h
@@ -0,0 +1,1582 @@
+//===- LLVMContextImpl.h - The LLVMContextImpl opaque class -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares LLVMContextImpl, the opaque implementation
+// of LLVMContext.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_LLVMCONTEXTIMPL_H
+#define LLVM_LIB_IR_LLVMCONTEXTIMPL_H
+
+#include "ConstantsContext.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Any.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/StringSaver.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class AttributeImpl;
+class AttributeListImpl;
+class AttributeSetNode;
+class BasicBlock;
+struct DiagnosticHandler;
+class ElementCount;
+class Function;
+class GlobalObject;
+class GlobalValue;
+class InlineAsm;
+class LLVMRemarkStreamer;
+class OptPassGate;
+namespace remarks {
+class RemarkStreamer;
+}
+template <typename T> class StringMapEntry;
+class StringRef;
+class ValueHandleBase;
+
+using DenseMapAPIntKeyInfo = DenseMapInfo<APInt>;
+
+struct DenseMapAPFloatKeyInfo {
+ static inline APFloat getEmptyKey() { return APFloat(APFloat::Bogus(), 1); }
+ static inline APFloat getTombstoneKey() {
+ return APFloat(APFloat::Bogus(), 2);
+ }
+
+ static unsigned getHashValue(const APFloat &Key) {
+ return static_cast<unsigned>(hash_value(Key));
+ }
+
+ static bool isEqual(const APFloat &LHS, const APFloat &RHS) {
+ return LHS.bitwiseIsEqual(RHS);
+ }
+};
+
+struct AnonStructTypeKeyInfo {
+ struct KeyTy {
+ ArrayRef<Type *> ETypes;
+ bool isPacked;
+
+ KeyTy(const ArrayRef<Type *> &E, bool P) : ETypes(E), isPacked(P) {}
+
+ KeyTy(const StructType *ST)
+ : ETypes(ST->elements()), isPacked(ST->isPacked()) {}
+
+ bool operator==(const KeyTy &that) const {
+ if (isPacked != that.isPacked)
+ return false;
+ if (ETypes != that.ETypes)
+ return false;
+ return true;
+ }
+ bool operator!=(const KeyTy &that) const { return !this->operator==(that); }
+ };
+
+ static inline StructType *getEmptyKey() {
+ return DenseMapInfo<StructType *>::getEmptyKey();
+ }
+
+ static inline StructType *getTombstoneKey() {
+ return DenseMapInfo<StructType *>::getTombstoneKey();
+ }
+
+ static unsigned getHashValue(const KeyTy &Key) {
+ return hash_combine(
+ hash_combine_range(Key.ETypes.begin(), Key.ETypes.end()), Key.isPacked);
+ }
+
+ static unsigned getHashValue(const StructType *ST) {
+ return getHashValue(KeyTy(ST));
+ }
+
+ static bool isEqual(const KeyTy &LHS, const StructType *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS == KeyTy(RHS);
+ }
+
+ static bool isEqual(const StructType *LHS, const StructType *RHS) {
+ return LHS == RHS;
+ }
+};
+
+struct FunctionTypeKeyInfo {
+ struct KeyTy {
+ const Type *ReturnType;
+ ArrayRef<Type *> Params;
+ bool isVarArg;
+
+ KeyTy(const Type *R, const ArrayRef<Type *> &P, bool V)
+ : ReturnType(R), Params(P), isVarArg(V) {}
+ KeyTy(const FunctionType *FT)
+ : ReturnType(FT->getReturnType()), Params(FT->params()),
+ isVarArg(FT->isVarArg()) {}
+
+ bool operator==(const KeyTy &that) const {
+ if (ReturnType != that.ReturnType)
+ return false;
+ if (isVarArg != that.isVarArg)
+ return false;
+ if (Params != that.Params)
+ return false;
+ return true;
+ }
+ bool operator!=(const KeyTy &that) const { return !this->operator==(that); }
+ };
+
+ static inline FunctionType *getEmptyKey() {
+ return DenseMapInfo<FunctionType *>::getEmptyKey();
+ }
+
+ static inline FunctionType *getTombstoneKey() {
+ return DenseMapInfo<FunctionType *>::getTombstoneKey();
+ }
+
+ static unsigned getHashValue(const KeyTy &Key) {
+ return hash_combine(
+ Key.ReturnType,
+ hash_combine_range(Key.Params.begin(), Key.Params.end()), Key.isVarArg);
+ }
+
+ static unsigned getHashValue(const FunctionType *FT) {
+ return getHashValue(KeyTy(FT));
+ }
+
+ static bool isEqual(const KeyTy &LHS, const FunctionType *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return LHS == KeyTy(RHS);
+ }
+
+ static bool isEqual(const FunctionType *LHS, const FunctionType *RHS) {
+ return LHS == RHS;
+ }
+};
+
+/// Structure for hashing arbitrary MDNode operands.
+class MDNodeOpsKey {
+ ArrayRef<Metadata *> RawOps;
+ ArrayRef<MDOperand> Ops;
+ unsigned Hash;
+
+protected:
+ MDNodeOpsKey(ArrayRef<Metadata *> Ops)
+ : RawOps(Ops), Hash(calculateHash(Ops)) {}
+
+ template <class NodeTy>
+ MDNodeOpsKey(const NodeTy *N, unsigned Offset = 0)
+ : Ops(N->op_begin() + Offset, N->op_end()), Hash(N->getHash()) {}
+
+ template <class NodeTy>
+ bool compareOps(const NodeTy *RHS, unsigned Offset = 0) const {
+ if (getHash() != RHS->getHash())
+ return false;
+
+ assert((RawOps.empty() || Ops.empty()) && "Two sets of operands?");
+ return RawOps.empty() ? compareOps(Ops, RHS, Offset)
+ : compareOps(RawOps, RHS, Offset);
+ }
+
+ static unsigned calculateHash(MDNode *N, unsigned Offset = 0);
+
+private:
+ template <class T>
+ static bool compareOps(ArrayRef<T> Ops, const MDNode *RHS, unsigned Offset) {
+ if (Ops.size() != RHS->getNumOperands() - Offset)
+ return false;
+ return std::equal(Ops.begin(), Ops.end(), RHS->op_begin() + Offset);
+ }
+
+ static unsigned calculateHash(ArrayRef<Metadata *> Ops);
+
+public:
+ unsigned getHash() const { return Hash; }
+};
+
+template <class NodeTy> struct MDNodeKeyImpl;
+
+/// Configuration point for MDNodeInfo::isEqual().
+template <class NodeTy> struct MDNodeSubsetEqualImpl {
+ using KeyTy = MDNodeKeyImpl<NodeTy>;
+
+ static bool isSubsetEqual(const KeyTy &LHS, const NodeTy *RHS) {
+ return false;
+ }
+
+ static bool isSubsetEqual(const NodeTy *LHS, const NodeTy *RHS) {
+ return false;
+ }
+};
+
+/// DenseMapInfo for MDTuple.
+///
+/// Note that we don't need the is-function-local bit, since that's implicit in
+/// the operands.
+template <> struct MDNodeKeyImpl<MDTuple> : MDNodeOpsKey {
+ MDNodeKeyImpl(ArrayRef<Metadata *> Ops) : MDNodeOpsKey(Ops) {}
+ MDNodeKeyImpl(const MDTuple *N) : MDNodeOpsKey(N) {}
+
+ bool isKeyOf(const MDTuple *RHS) const { return compareOps(RHS); }
+
+ unsigned getHashValue() const { return getHash(); }
+
+ static unsigned calculateHash(MDTuple *N) {
+ return MDNodeOpsKey::calculateHash(N);
+ }
+};
+
+/// DenseMapInfo for DILocation.
+template <> struct MDNodeKeyImpl<DILocation> {
+ unsigned Line;
+ unsigned Column;
+ Metadata *Scope;
+ Metadata *InlinedAt;
+ bool ImplicitCode;
+
+ MDNodeKeyImpl(unsigned Line, unsigned Column, Metadata *Scope,
+ Metadata *InlinedAt, bool ImplicitCode)
+ : Line(Line), Column(Column), Scope(Scope), InlinedAt(InlinedAt),
+ ImplicitCode(ImplicitCode) {}
+ MDNodeKeyImpl(const DILocation *L)
+ : Line(L->getLine()), Column(L->getColumn()), Scope(L->getRawScope()),
+ InlinedAt(L->getRawInlinedAt()), ImplicitCode(L->isImplicitCode()) {}
+
+ bool isKeyOf(const DILocation *RHS) const {
+ return Line == RHS->getLine() && Column == RHS->getColumn() &&
+ Scope == RHS->getRawScope() && InlinedAt == RHS->getRawInlinedAt() &&
+ ImplicitCode == RHS->isImplicitCode();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Line, Column, Scope, InlinedAt, ImplicitCode);
+ }
+};
+
+/// DenseMapInfo for GenericDINode.
+template <> struct MDNodeKeyImpl<GenericDINode> : MDNodeOpsKey {
+ unsigned Tag;
+ MDString *Header;
+
+ MDNodeKeyImpl(unsigned Tag, MDString *Header, ArrayRef<Metadata *> DwarfOps)
+ : MDNodeOpsKey(DwarfOps), Tag(Tag), Header(Header) {}
+ MDNodeKeyImpl(const GenericDINode *N)
+ : MDNodeOpsKey(N, 1), Tag(N->getTag()), Header(N->getRawHeader()) {}
+
+ bool isKeyOf(const GenericDINode *RHS) const {
+ return Tag == RHS->getTag() && Header == RHS->getRawHeader() &&
+ compareOps(RHS, 1);
+ }
+
+ unsigned getHashValue() const { return hash_combine(getHash(), Tag, Header); }
+
+ static unsigned calculateHash(GenericDINode *N) {
+ return MDNodeOpsKey::calculateHash(N, 1);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DISubrange> {
+ Metadata *CountNode;
+ Metadata *LowerBound;
+ Metadata *UpperBound;
+ Metadata *Stride;
+
+ MDNodeKeyImpl(Metadata *CountNode, Metadata *LowerBound, Metadata *UpperBound,
+ Metadata *Stride)
+ : CountNode(CountNode), LowerBound(LowerBound), UpperBound(UpperBound),
+ Stride(Stride) {}
+ MDNodeKeyImpl(const DISubrange *N)
+ : CountNode(N->getRawCountNode()), LowerBound(N->getRawLowerBound()),
+ UpperBound(N->getRawUpperBound()), Stride(N->getRawStride()) {}
+
+ bool isKeyOf(const DISubrange *RHS) const {
+ auto BoundsEqual = [=](Metadata *Node1, Metadata *Node2) -> bool {
+ if (Node1 == Node2)
+ return true;
+
+ ConstantAsMetadata *MD1 = dyn_cast_or_null<ConstantAsMetadata>(Node1);
+ ConstantAsMetadata *MD2 = dyn_cast_or_null<ConstantAsMetadata>(Node2);
+ if (MD1 && MD2) {
+ ConstantInt *CV1 = cast<ConstantInt>(MD1->getValue());
+ ConstantInt *CV2 = cast<ConstantInt>(MD2->getValue());
+ if (CV1->getSExtValue() == CV2->getSExtValue())
+ return true;
+ }
+ return false;
+ };
+
+ return BoundsEqual(CountNode, RHS->getRawCountNode()) &&
+ BoundsEqual(LowerBound, RHS->getRawLowerBound()) &&
+ BoundsEqual(UpperBound, RHS->getRawUpperBound()) &&
+ BoundsEqual(Stride, RHS->getRawStride());
+ }
+
+ unsigned getHashValue() const {
+ if (CountNode)
+ if (auto *MD = dyn_cast<ConstantAsMetadata>(CountNode))
+ return hash_combine(cast<ConstantInt>(MD->getValue())->getSExtValue(),
+ LowerBound, UpperBound, Stride);
+ return hash_combine(CountNode, LowerBound, UpperBound, Stride);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIGenericSubrange> {
+ Metadata *CountNode;
+ Metadata *LowerBound;
+ Metadata *UpperBound;
+ Metadata *Stride;
+
+ MDNodeKeyImpl(Metadata *CountNode, Metadata *LowerBound, Metadata *UpperBound,
+ Metadata *Stride)
+ : CountNode(CountNode), LowerBound(LowerBound), UpperBound(UpperBound),
+ Stride(Stride) {}
+ MDNodeKeyImpl(const DIGenericSubrange *N)
+ : CountNode(N->getRawCountNode()), LowerBound(N->getRawLowerBound()),
+ UpperBound(N->getRawUpperBound()), Stride(N->getRawStride()) {}
+
+ bool isKeyOf(const DIGenericSubrange *RHS) const {
+ return (CountNode == RHS->getRawCountNode()) &&
+ (LowerBound == RHS->getRawLowerBound()) &&
+ (UpperBound == RHS->getRawUpperBound()) &&
+ (Stride == RHS->getRawStride());
+ }
+
+ unsigned getHashValue() const {
+ auto *MD = dyn_cast_or_null<ConstantAsMetadata>(CountNode);
+ if (CountNode && MD)
+ return hash_combine(cast<ConstantInt>(MD->getValue())->getSExtValue(),
+ LowerBound, UpperBound, Stride);
+ return hash_combine(CountNode, LowerBound, UpperBound, Stride);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIEnumerator> {
+ APInt Value;
+ MDString *Name;
+ bool IsUnsigned;
+
+ MDNodeKeyImpl(APInt Value, bool IsUnsigned, MDString *Name)
+ : Value(Value), Name(Name), IsUnsigned(IsUnsigned) {}
+ MDNodeKeyImpl(int64_t Value, bool IsUnsigned, MDString *Name)
+ : Value(APInt(64, Value, !IsUnsigned)), Name(Name),
+ IsUnsigned(IsUnsigned) {}
+ MDNodeKeyImpl(const DIEnumerator *N)
+ : Value(N->getValue()), Name(N->getRawName()),
+ IsUnsigned(N->isUnsigned()) {}
+
+ bool isKeyOf(const DIEnumerator *RHS) const {
+ return Value.getBitWidth() == RHS->getValue().getBitWidth() &&
+ Value == RHS->getValue() && IsUnsigned == RHS->isUnsigned() &&
+ Name == RHS->getRawName();
+ }
+
+ unsigned getHashValue() const { return hash_combine(Value, Name); }
+};
+
+template <> struct MDNodeKeyImpl<DIBasicType> {
+ unsigned Tag;
+ MDString *Name;
+ uint64_t SizeInBits;
+ uint32_t AlignInBits;
+ unsigned Encoding;
+ unsigned Flags;
+
+ MDNodeKeyImpl(unsigned Tag, MDString *Name, uint64_t SizeInBits,
+ uint32_t AlignInBits, unsigned Encoding, unsigned Flags)
+ : Tag(Tag), Name(Name), SizeInBits(SizeInBits), AlignInBits(AlignInBits),
+ Encoding(Encoding), Flags(Flags) {}
+ MDNodeKeyImpl(const DIBasicType *N)
+ : Tag(N->getTag()), Name(N->getRawName()), SizeInBits(N->getSizeInBits()),
+ AlignInBits(N->getAlignInBits()), Encoding(N->getEncoding()),
+ Flags(N->getFlags()) {}
+
+ bool isKeyOf(const DIBasicType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getRawName() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ Encoding == RHS->getEncoding() && Flags == RHS->getFlags();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Name, SizeInBits, AlignInBits, Encoding);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIStringType> {
+ unsigned Tag;
+ MDString *Name;
+ Metadata *StringLength;
+ Metadata *StringLengthExp;
+ Metadata *StringLocationExp;
+ uint64_t SizeInBits;
+ uint32_t AlignInBits;
+ unsigned Encoding;
+
+ MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *StringLength,
+ Metadata *StringLengthExp, Metadata *StringLocationExp,
+ uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding)
+ : Tag(Tag), Name(Name), StringLength(StringLength),
+ StringLengthExp(StringLengthExp), StringLocationExp(StringLocationExp),
+ SizeInBits(SizeInBits), AlignInBits(AlignInBits), Encoding(Encoding) {}
+ MDNodeKeyImpl(const DIStringType *N)
+ : Tag(N->getTag()), Name(N->getRawName()),
+ StringLength(N->getRawStringLength()),
+ StringLengthExp(N->getRawStringLengthExp()),
+ StringLocationExp(N->getRawStringLocationExp()),
+ SizeInBits(N->getSizeInBits()), AlignInBits(N->getAlignInBits()),
+ Encoding(N->getEncoding()) {}
+
+ bool isKeyOf(const DIStringType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getRawName() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ Encoding == RHS->getEncoding();
+ }
+ unsigned getHashValue() const { return hash_combine(Tag, Name, Encoding); }
+};
+
+template <> struct MDNodeKeyImpl<DIDerivedType> {
+ unsigned Tag;
+ MDString *Name;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Scope;
+ Metadata *BaseType;
+ uint64_t SizeInBits;
+ uint64_t OffsetInBits;
+ uint32_t AlignInBits;
+ Optional<unsigned> DWARFAddressSpace;
+ unsigned Flags;
+ Metadata *ExtraData;
+ Metadata *Annotations;
+
+ MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits,
+ Optional<unsigned> DWARFAddressSpace, unsigned Flags,
+ Metadata *ExtraData, Metadata *Annotations)
+ : Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope),
+ BaseType(BaseType), SizeInBits(SizeInBits), OffsetInBits(OffsetInBits),
+ AlignInBits(AlignInBits), DWARFAddressSpace(DWARFAddressSpace),
+ Flags(Flags), ExtraData(ExtraData), Annotations(Annotations) {}
+ MDNodeKeyImpl(const DIDerivedType *N)
+ : Tag(N->getTag()), Name(N->getRawName()), File(N->getRawFile()),
+ Line(N->getLine()), Scope(N->getRawScope()),
+ BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()),
+ OffsetInBits(N->getOffsetInBits()), AlignInBits(N->getAlignInBits()),
+ DWARFAddressSpace(N->getDWARFAddressSpace()), Flags(N->getFlags()),
+ ExtraData(N->getRawExtraData()), Annotations(N->getRawAnnotations()) {}
+
+ bool isKeyOf(const DIDerivedType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getRawName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Scope == RHS->getRawScope() && BaseType == RHS->getRawBaseType() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ OffsetInBits == RHS->getOffsetInBits() &&
+ DWARFAddressSpace == RHS->getDWARFAddressSpace() &&
+ Flags == RHS->getFlags() && ExtraData == RHS->getRawExtraData() &&
+ Annotations == RHS->getRawAnnotations();
+ }
+
+ unsigned getHashValue() const {
+ // If this is a member inside an ODR type, only hash the type and the name.
+ // Otherwise the hash will be stronger than
+ // MDNodeSubsetEqualImpl::isODRMember().
+ if (Tag == dwarf::DW_TAG_member && Name)
+ if (auto *CT = dyn_cast_or_null<DICompositeType>(Scope))
+ if (CT->getRawIdentifier())
+ return hash_combine(Name, Scope);
+
+ // Intentionally computes the hash on a subset of the operands for
+ // performance reason. The subset has to be significant enough to avoid
+ // collision "most of the time". There is no correctness issue in case of
+ // collision because of the full check above.
+ return hash_combine(Tag, Name, File, Line, Scope, BaseType, Flags);
+ }
+};
+
+template <> struct MDNodeSubsetEqualImpl<DIDerivedType> {
+ using KeyTy = MDNodeKeyImpl<DIDerivedType>;
+
+ static bool isSubsetEqual(const KeyTy &LHS, const DIDerivedType *RHS) {
+ return isODRMember(LHS.Tag, LHS.Scope, LHS.Name, RHS);
+ }
+
+ static bool isSubsetEqual(const DIDerivedType *LHS,
+ const DIDerivedType *RHS) {
+ return isODRMember(LHS->getTag(), LHS->getRawScope(), LHS->getRawName(),
+ RHS);
+ }
+
+ /// Subprograms compare equal if they declare the same function in an ODR
+ /// type.
+ static bool isODRMember(unsigned Tag, const Metadata *Scope,
+ const MDString *Name, const DIDerivedType *RHS) {
+ // Check whether the LHS is eligible.
+ if (Tag != dwarf::DW_TAG_member || !Name)
+ return false;
+
+ auto *CT = dyn_cast_or_null<DICompositeType>(Scope);
+ if (!CT || !CT->getRawIdentifier())
+ return false;
+
+ // Compare to the RHS.
+ return Tag == RHS->getTag() && Name == RHS->getRawName() &&
+ Scope == RHS->getRawScope();
+ }
+};
+
+template <> struct MDNodeKeyImpl<DICompositeType> {
+ unsigned Tag;
+ MDString *Name;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Scope;
+ Metadata *BaseType;
+ uint64_t SizeInBits;
+ uint64_t OffsetInBits;
+ uint32_t AlignInBits;
+ unsigned Flags;
+ Metadata *Elements;
+ unsigned RuntimeLang;
+ Metadata *VTableHolder;
+ Metadata *TemplateParams;
+ MDString *Identifier;
+ Metadata *Discriminator;
+ Metadata *DataLocation;
+ Metadata *Associated;
+ Metadata *Allocated;
+ Metadata *Rank;
+ Metadata *Annotations;
+
+ MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ Metadata *Elements, unsigned RuntimeLang,
+ Metadata *VTableHolder, Metadata *TemplateParams,
+ MDString *Identifier, Metadata *Discriminator,
+ Metadata *DataLocation, Metadata *Associated,
+ Metadata *Allocated, Metadata *Rank, Metadata *Annotations)
+ : Tag(Tag), Name(Name), File(File), Line(Line), Scope(Scope),
+ BaseType(BaseType), SizeInBits(SizeInBits), OffsetInBits(OffsetInBits),
+ AlignInBits(AlignInBits), Flags(Flags), Elements(Elements),
+ RuntimeLang(RuntimeLang), VTableHolder(VTableHolder),
+ TemplateParams(TemplateParams), Identifier(Identifier),
+ Discriminator(Discriminator), DataLocation(DataLocation),
+ Associated(Associated), Allocated(Allocated), Rank(Rank),
+ Annotations(Annotations) {}
+ MDNodeKeyImpl(const DICompositeType *N)
+ : Tag(N->getTag()), Name(N->getRawName()), File(N->getRawFile()),
+ Line(N->getLine()), Scope(N->getRawScope()),
+ BaseType(N->getRawBaseType()), SizeInBits(N->getSizeInBits()),
+ OffsetInBits(N->getOffsetInBits()), AlignInBits(N->getAlignInBits()),
+ Flags(N->getFlags()), Elements(N->getRawElements()),
+ RuntimeLang(N->getRuntimeLang()), VTableHolder(N->getRawVTableHolder()),
+ TemplateParams(N->getRawTemplateParams()),
+ Identifier(N->getRawIdentifier()),
+ Discriminator(N->getRawDiscriminator()),
+ DataLocation(N->getRawDataLocation()),
+ Associated(N->getRawAssociated()), Allocated(N->getRawAllocated()),
+ Rank(N->getRawRank()), Annotations(N->getRawAnnotations()) {}
+
+ bool isKeyOf(const DICompositeType *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getRawName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Scope == RHS->getRawScope() && BaseType == RHS->getRawBaseType() &&
+ SizeInBits == RHS->getSizeInBits() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ OffsetInBits == RHS->getOffsetInBits() && Flags == RHS->getFlags() &&
+ Elements == RHS->getRawElements() &&
+ RuntimeLang == RHS->getRuntimeLang() &&
+ VTableHolder == RHS->getRawVTableHolder() &&
+ TemplateParams == RHS->getRawTemplateParams() &&
+ Identifier == RHS->getRawIdentifier() &&
+ Discriminator == RHS->getRawDiscriminator() &&
+ DataLocation == RHS->getRawDataLocation() &&
+ Associated == RHS->getRawAssociated() &&
+ Allocated == RHS->getRawAllocated() && Rank == RHS->getRawRank() &&
+ Annotations == RHS->getRawAnnotations();
+ }
+
+ unsigned getHashValue() const {
+ // Intentionally computes the hash on a subset of the operands for
+ // performance reason. The subset has to be significant enough to avoid
+ // collision "most of the time". There is no correctness issue in case of
+ // collision because of the full check above.
+ return hash_combine(Name, File, Line, BaseType, Scope, Elements,
+ TemplateParams, Annotations);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DISubroutineType> {
+ unsigned Flags;
+ uint8_t CC;
+ Metadata *TypeArray;
+
+ MDNodeKeyImpl(unsigned Flags, uint8_t CC, Metadata *TypeArray)
+ : Flags(Flags), CC(CC), TypeArray(TypeArray) {}
+ MDNodeKeyImpl(const DISubroutineType *N)
+ : Flags(N->getFlags()), CC(N->getCC()), TypeArray(N->getRawTypeArray()) {}
+
+ bool isKeyOf(const DISubroutineType *RHS) const {
+ return Flags == RHS->getFlags() && CC == RHS->getCC() &&
+ TypeArray == RHS->getRawTypeArray();
+ }
+
+ unsigned getHashValue() const { return hash_combine(Flags, CC, TypeArray); }
+};
+
+template <> struct MDNodeKeyImpl<DIFile> {
+ MDString *Filename;
+ MDString *Directory;
+ Optional<DIFile::ChecksumInfo<MDString *>> Checksum;
+ Optional<MDString *> Source;
+
+ MDNodeKeyImpl(MDString *Filename, MDString *Directory,
+ Optional<DIFile::ChecksumInfo<MDString *>> Checksum,
+ Optional<MDString *> Source)
+ : Filename(Filename), Directory(Directory), Checksum(Checksum),
+ Source(Source) {}
+ MDNodeKeyImpl(const DIFile *N)
+ : Filename(N->getRawFilename()), Directory(N->getRawDirectory()),
+ Checksum(N->getRawChecksum()), Source(N->getRawSource()) {}
+
+ bool isKeyOf(const DIFile *RHS) const {
+ return Filename == RHS->getRawFilename() &&
+ Directory == RHS->getRawDirectory() &&
+ Checksum == RHS->getRawChecksum() && Source == RHS->getRawSource();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Filename, Directory, Checksum ? Checksum->Kind : 0,
+ Checksum ? Checksum->Value : nullptr,
+ Source.value_or(nullptr));
+ }
+};
+
+template <> struct MDNodeKeyImpl<DISubprogram> {
+ Metadata *Scope;
+ MDString *Name;
+ MDString *LinkageName;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Type;
+ unsigned ScopeLine;
+ Metadata *ContainingType;
+ unsigned VirtualIndex;
+ int ThisAdjustment;
+ unsigned Flags;
+ unsigned SPFlags;
+ Metadata *Unit;
+ Metadata *TemplateParams;
+ Metadata *Declaration;
+ Metadata *RetainedNodes;
+ Metadata *ThrownTypes;
+ Metadata *Annotations;
+ MDString *TargetFuncName;
+
+ MDNodeKeyImpl(Metadata *Scope, MDString *Name, MDString *LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ unsigned ScopeLine, Metadata *ContainingType,
+ unsigned VirtualIndex, int ThisAdjustment, unsigned Flags,
+ unsigned SPFlags, Metadata *Unit, Metadata *TemplateParams,
+ Metadata *Declaration, Metadata *RetainedNodes,
+ Metadata *ThrownTypes, Metadata *Annotations,
+ MDString *TargetFuncName)
+ : Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
+ Line(Line), Type(Type), ScopeLine(ScopeLine),
+ ContainingType(ContainingType), VirtualIndex(VirtualIndex),
+ ThisAdjustment(ThisAdjustment), Flags(Flags), SPFlags(SPFlags),
+ Unit(Unit), TemplateParams(TemplateParams), Declaration(Declaration),
+ RetainedNodes(RetainedNodes), ThrownTypes(ThrownTypes),
+ Annotations(Annotations), TargetFuncName(TargetFuncName) {}
+ MDNodeKeyImpl(const DISubprogram *N)
+ : Scope(N->getRawScope()), Name(N->getRawName()),
+ LinkageName(N->getRawLinkageName()), File(N->getRawFile()),
+ Line(N->getLine()), Type(N->getRawType()), ScopeLine(N->getScopeLine()),
+ ContainingType(N->getRawContainingType()),
+ VirtualIndex(N->getVirtualIndex()),
+ ThisAdjustment(N->getThisAdjustment()), Flags(N->getFlags()),
+ SPFlags(N->getSPFlags()), Unit(N->getRawUnit()),
+ TemplateParams(N->getRawTemplateParams()),
+ Declaration(N->getRawDeclaration()),
+ RetainedNodes(N->getRawRetainedNodes()),
+ ThrownTypes(N->getRawThrownTypes()),
+ Annotations(N->getRawAnnotations()),
+ TargetFuncName(N->getRawTargetFuncName()) {}
+
+ bool isKeyOf(const DISubprogram *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
+ LinkageName == RHS->getRawLinkageName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Type == RHS->getRawType() && ScopeLine == RHS->getScopeLine() &&
+ ContainingType == RHS->getRawContainingType() &&
+ VirtualIndex == RHS->getVirtualIndex() &&
+ ThisAdjustment == RHS->getThisAdjustment() &&
+ Flags == RHS->getFlags() && SPFlags == RHS->getSPFlags() &&
+ Unit == RHS->getUnit() &&
+ TemplateParams == RHS->getRawTemplateParams() &&
+ Declaration == RHS->getRawDeclaration() &&
+ RetainedNodes == RHS->getRawRetainedNodes() &&
+ ThrownTypes == RHS->getRawThrownTypes() &&
+ Annotations == RHS->getRawAnnotations() &&
+ TargetFuncName == RHS->getRawTargetFuncName();
+ }
+
+ bool isDefinition() const { return SPFlags & DISubprogram::SPFlagDefinition; }
+
+ unsigned getHashValue() const {
+ // If this is a declaration inside an ODR type, only hash the type and the
+ // name. Otherwise the hash will be stronger than
+ // MDNodeSubsetEqualImpl::isDeclarationOfODRMember().
+ if (!isDefinition() && LinkageName)
+ if (auto *CT = dyn_cast_or_null<DICompositeType>(Scope))
+ if (CT->getRawIdentifier())
+ return hash_combine(LinkageName, Scope);
+
+ // Intentionally computes the hash on a subset of the operands for
+ // performance reason. The subset has to be significant enough to avoid
+ // collision "most of the time". There is no correctness issue in case of
+ // collision because of the full check above.
+ return hash_combine(Name, Scope, File, Type, Line);
+ }
+};
+
+template <> struct MDNodeSubsetEqualImpl<DISubprogram> {
+ using KeyTy = MDNodeKeyImpl<DISubprogram>;
+
+ static bool isSubsetEqual(const KeyTy &LHS, const DISubprogram *RHS) {
+ return isDeclarationOfODRMember(LHS.isDefinition(), LHS.Scope,
+ LHS.LinkageName, LHS.TemplateParams, RHS);
+ }
+
+ static bool isSubsetEqual(const DISubprogram *LHS, const DISubprogram *RHS) {
+ return isDeclarationOfODRMember(LHS->isDefinition(), LHS->getRawScope(),
+ LHS->getRawLinkageName(),
+ LHS->getRawTemplateParams(), RHS);
+ }
+
+ /// Subprograms compare equal if they declare the same function in an ODR
+ /// type.
+ static bool isDeclarationOfODRMember(bool IsDefinition, const Metadata *Scope,
+ const MDString *LinkageName,
+ const Metadata *TemplateParams,
+ const DISubprogram *RHS) {
+ // Check whether the LHS is eligible.
+ if (IsDefinition || !Scope || !LinkageName)
+ return false;
+
+ auto *CT = dyn_cast_or_null<DICompositeType>(Scope);
+ if (!CT || !CT->getRawIdentifier())
+ return false;
+
+ // Compare to the RHS.
+ // FIXME: We need to compare template parameters here to avoid incorrect
+ // collisions in mapMetadata when RF_ReuseAndMutateDistinctMDs and a
+ // ODR-DISubprogram has a non-ODR template parameter (i.e., a
+ // DICompositeType that does not have an identifier). Eventually we should
+ // decouple ODR logic from uniquing logic.
+ return IsDefinition == RHS->isDefinition() && Scope == RHS->getRawScope() &&
+ LinkageName == RHS->getRawLinkageName() &&
+ TemplateParams == RHS->getRawTemplateParams();
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILexicalBlock> {
+ Metadata *Scope;
+ Metadata *File;
+ unsigned Line;
+ unsigned Column;
+
+ MDNodeKeyImpl(Metadata *Scope, Metadata *File, unsigned Line, unsigned Column)
+ : Scope(Scope), File(File), Line(Line), Column(Column) {}
+ MDNodeKeyImpl(const DILexicalBlock *N)
+ : Scope(N->getRawScope()), File(N->getRawFile()), Line(N->getLine()),
+ Column(N->getColumn()) {}
+
+ bool isKeyOf(const DILexicalBlock *RHS) const {
+ return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
+ Line == RHS->getLine() && Column == RHS->getColumn();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Scope, File, Line, Column);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILexicalBlockFile> {
+ Metadata *Scope;
+ Metadata *File;
+ unsigned Discriminator;
+
+ MDNodeKeyImpl(Metadata *Scope, Metadata *File, unsigned Discriminator)
+ : Scope(Scope), File(File), Discriminator(Discriminator) {}
+ MDNodeKeyImpl(const DILexicalBlockFile *N)
+ : Scope(N->getRawScope()), File(N->getRawFile()),
+ Discriminator(N->getDiscriminator()) {}
+
+ bool isKeyOf(const DILexicalBlockFile *RHS) const {
+ return Scope == RHS->getRawScope() && File == RHS->getRawFile() &&
+ Discriminator == RHS->getDiscriminator();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Scope, File, Discriminator);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DINamespace> {
+ Metadata *Scope;
+ MDString *Name;
+ bool ExportSymbols;
+
+ MDNodeKeyImpl(Metadata *Scope, MDString *Name, bool ExportSymbols)
+ : Scope(Scope), Name(Name), ExportSymbols(ExportSymbols) {}
+ MDNodeKeyImpl(const DINamespace *N)
+ : Scope(N->getRawScope()), Name(N->getRawName()),
+ ExportSymbols(N->getExportSymbols()) {}
+
+ bool isKeyOf(const DINamespace *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
+ ExportSymbols == RHS->getExportSymbols();
+ }
+
+ unsigned getHashValue() const { return hash_combine(Scope, Name); }
+};
+
+template <> struct MDNodeKeyImpl<DICommonBlock> {
+ Metadata *Scope;
+ Metadata *Decl;
+ MDString *Name;
+ Metadata *File;
+ unsigned LineNo;
+
+ MDNodeKeyImpl(Metadata *Scope, Metadata *Decl, MDString *Name, Metadata *File,
+ unsigned LineNo)
+ : Scope(Scope), Decl(Decl), Name(Name), File(File), LineNo(LineNo) {}
+ MDNodeKeyImpl(const DICommonBlock *N)
+ : Scope(N->getRawScope()), Decl(N->getRawDecl()), Name(N->getRawName()),
+ File(N->getRawFile()), LineNo(N->getLineNo()) {}
+
+ bool isKeyOf(const DICommonBlock *RHS) const {
+ return Scope == RHS->getRawScope() && Decl == RHS->getRawDecl() &&
+ Name == RHS->getRawName() && File == RHS->getRawFile() &&
+ LineNo == RHS->getLineNo();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Scope, Decl, Name, File, LineNo);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIModule> {
+ Metadata *File;
+ Metadata *Scope;
+ MDString *Name;
+ MDString *ConfigurationMacros;
+ MDString *IncludePath;
+ MDString *APINotesFile;
+ unsigned LineNo;
+ bool IsDecl;
+
+ MDNodeKeyImpl(Metadata *File, Metadata *Scope, MDString *Name,
+ MDString *ConfigurationMacros, MDString *IncludePath,
+ MDString *APINotesFile, unsigned LineNo, bool IsDecl)
+ : File(File), Scope(Scope), Name(Name),
+ ConfigurationMacros(ConfigurationMacros), IncludePath(IncludePath),
+ APINotesFile(APINotesFile), LineNo(LineNo), IsDecl(IsDecl) {}
+ MDNodeKeyImpl(const DIModule *N)
+ : File(N->getRawFile()), Scope(N->getRawScope()), Name(N->getRawName()),
+ ConfigurationMacros(N->getRawConfigurationMacros()),
+ IncludePath(N->getRawIncludePath()),
+ APINotesFile(N->getRawAPINotesFile()), LineNo(N->getLineNo()),
+ IsDecl(N->getIsDecl()) {}
+
+ bool isKeyOf(const DIModule *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
+ ConfigurationMacros == RHS->getRawConfigurationMacros() &&
+ IncludePath == RHS->getRawIncludePath() &&
+ APINotesFile == RHS->getRawAPINotesFile() &&
+ File == RHS->getRawFile() && LineNo == RHS->getLineNo() &&
+ IsDecl == RHS->getIsDecl();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Scope, Name, ConfigurationMacros, IncludePath);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DITemplateTypeParameter> {
+ MDString *Name;
+ Metadata *Type;
+ bool IsDefault;
+
+ MDNodeKeyImpl(MDString *Name, Metadata *Type, bool IsDefault)
+ : Name(Name), Type(Type), IsDefault(IsDefault) {}
+ MDNodeKeyImpl(const DITemplateTypeParameter *N)
+ : Name(N->getRawName()), Type(N->getRawType()),
+ IsDefault(N->isDefault()) {}
+
+ bool isKeyOf(const DITemplateTypeParameter *RHS) const {
+ return Name == RHS->getRawName() && Type == RHS->getRawType() &&
+ IsDefault == RHS->isDefault();
+ }
+
+ unsigned getHashValue() const { return hash_combine(Name, Type, IsDefault); }
+};
+
+template <> struct MDNodeKeyImpl<DITemplateValueParameter> {
+ unsigned Tag;
+ MDString *Name;
+ Metadata *Type;
+ bool IsDefault;
+ Metadata *Value;
+
+ MDNodeKeyImpl(unsigned Tag, MDString *Name, Metadata *Type, bool IsDefault,
+ Metadata *Value)
+ : Tag(Tag), Name(Name), Type(Type), IsDefault(IsDefault), Value(Value) {}
+ MDNodeKeyImpl(const DITemplateValueParameter *N)
+ : Tag(N->getTag()), Name(N->getRawName()), Type(N->getRawType()),
+ IsDefault(N->isDefault()), Value(N->getValue()) {}
+
+ bool isKeyOf(const DITemplateValueParameter *RHS) const {
+ return Tag == RHS->getTag() && Name == RHS->getRawName() &&
+ Type == RHS->getRawType() && IsDefault == RHS->isDefault() &&
+ Value == RHS->getValue();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Name, Type, IsDefault, Value);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIGlobalVariable> {
+ Metadata *Scope;
+ MDString *Name;
+ MDString *LinkageName;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Type;
+ bool IsLocalToUnit;
+ bool IsDefinition;
+ Metadata *StaticDataMemberDeclaration;
+ Metadata *TemplateParams;
+ uint32_t AlignInBits;
+ Metadata *Annotations;
+
+ MDNodeKeyImpl(Metadata *Scope, MDString *Name, MDString *LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition,
+ Metadata *StaticDataMemberDeclaration, Metadata *TemplateParams,
+ uint32_t AlignInBits, Metadata *Annotations)
+ : Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
+ Line(Line), Type(Type), IsLocalToUnit(IsLocalToUnit),
+ IsDefinition(IsDefinition),
+ StaticDataMemberDeclaration(StaticDataMemberDeclaration),
+ TemplateParams(TemplateParams), AlignInBits(AlignInBits),
+ Annotations(Annotations) {}
+ MDNodeKeyImpl(const DIGlobalVariable *N)
+ : Scope(N->getRawScope()), Name(N->getRawName()),
+ LinkageName(N->getRawLinkageName()), File(N->getRawFile()),
+ Line(N->getLine()), Type(N->getRawType()),
+ IsLocalToUnit(N->isLocalToUnit()), IsDefinition(N->isDefinition()),
+ StaticDataMemberDeclaration(N->getRawStaticDataMemberDeclaration()),
+ TemplateParams(N->getRawTemplateParams()),
+ AlignInBits(N->getAlignInBits()), Annotations(N->getRawAnnotations()) {}
+
+ bool isKeyOf(const DIGlobalVariable *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
+ LinkageName == RHS->getRawLinkageName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Type == RHS->getRawType() && IsLocalToUnit == RHS->isLocalToUnit() &&
+ IsDefinition == RHS->isDefinition() &&
+ StaticDataMemberDeclaration ==
+ RHS->getRawStaticDataMemberDeclaration() &&
+ TemplateParams == RHS->getRawTemplateParams() &&
+ AlignInBits == RHS->getAlignInBits() &&
+ Annotations == RHS->getRawAnnotations();
+ }
+
+ unsigned getHashValue() const {
+ // We do not use AlignInBits in hashing function here on purpose:
+ // in most cases this param for local variable is zero (for function param
+ // it is always zero). This leads to lots of hash collisions and errors on
+ // cases with lots of similar variables.
+ // clang/test/CodeGen/debug-info-257-args.c is an example of this problem,
+ // generated IR is random for each run and test fails with Align included.
+ // TODO: make hashing work fine with such situations
+ return hash_combine(Scope, Name, LinkageName, File, Line, Type,
+ IsLocalToUnit, IsDefinition, /* AlignInBits, */
+ StaticDataMemberDeclaration, Annotations);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILocalVariable> {
+ Metadata *Scope;
+ MDString *Name;
+ Metadata *File;
+ unsigned Line;
+ Metadata *Type;
+ unsigned Arg;
+ unsigned Flags;
+ uint32_t AlignInBits;
+ Metadata *Annotations;
+
+ MDNodeKeyImpl(Metadata *Scope, MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Type, unsigned Arg, unsigned Flags,
+ uint32_t AlignInBits, Metadata *Annotations)
+ : Scope(Scope), Name(Name), File(File), Line(Line), Type(Type), Arg(Arg),
+ Flags(Flags), AlignInBits(AlignInBits), Annotations(Annotations) {}
+ MDNodeKeyImpl(const DILocalVariable *N)
+ : Scope(N->getRawScope()), Name(N->getRawName()), File(N->getRawFile()),
+ Line(N->getLine()), Type(N->getRawType()), Arg(N->getArg()),
+ Flags(N->getFlags()), AlignInBits(N->getAlignInBits()),
+ Annotations(N->getRawAnnotations()) {}
+
+ bool isKeyOf(const DILocalVariable *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine() &&
+ Type == RHS->getRawType() && Arg == RHS->getArg() &&
+ Flags == RHS->getFlags() && AlignInBits == RHS->getAlignInBits() &&
+ Annotations == RHS->getRawAnnotations();
+ }
+
+ unsigned getHashValue() const {
+ // We do not use AlignInBits in hashing function here on purpose:
+ // in most cases this param for local variable is zero (for function param
+ // it is always zero). This leads to lots of hash collisions and errors on
+ // cases with lots of similar variables.
+ // clang/test/CodeGen/debug-info-257-args.c is an example of this problem,
+ // generated IR is random for each run and test fails with Align included.
+ // TODO: make hashing work fine with such situations
+ return hash_combine(Scope, Name, File, Line, Type, Arg, Flags, Annotations);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DILabel> {
+ Metadata *Scope;
+ MDString *Name;
+ Metadata *File;
+ unsigned Line;
+
+ MDNodeKeyImpl(Metadata *Scope, MDString *Name, Metadata *File, unsigned Line)
+ : Scope(Scope), Name(Name), File(File), Line(Line) {}
+ MDNodeKeyImpl(const DILabel *N)
+ : Scope(N->getRawScope()), Name(N->getRawName()), File(N->getRawFile()),
+ Line(N->getLine()) {}
+
+ bool isKeyOf(const DILabel *RHS) const {
+ return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
+ File == RHS->getRawFile() && Line == RHS->getLine();
+ }
+
+ /// Using name and line to get hash value. It should already be mostly unique.
+ unsigned getHashValue() const { return hash_combine(Scope, Name, Line); }
+};
+
+template <> struct MDNodeKeyImpl<DIExpression> {
+ ArrayRef<uint64_t> Elements;
+
+ MDNodeKeyImpl(ArrayRef<uint64_t> Elements) : Elements(Elements) {}
+ MDNodeKeyImpl(const DIExpression *N) : Elements(N->getElements()) {}
+
+ bool isKeyOf(const DIExpression *RHS) const {
+ return Elements == RHS->getElements();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine_range(Elements.begin(), Elements.end());
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIGlobalVariableExpression> {
+ Metadata *Variable;
+ Metadata *Expression;
+
+ MDNodeKeyImpl(Metadata *Variable, Metadata *Expression)
+ : Variable(Variable), Expression(Expression) {}
+ MDNodeKeyImpl(const DIGlobalVariableExpression *N)
+ : Variable(N->getRawVariable()), Expression(N->getRawExpression()) {}
+
+ bool isKeyOf(const DIGlobalVariableExpression *RHS) const {
+ return Variable == RHS->getRawVariable() &&
+ Expression == RHS->getRawExpression();
+ }
+
+ unsigned getHashValue() const { return hash_combine(Variable, Expression); }
+};
+
+template <> struct MDNodeKeyImpl<DIObjCProperty> {
+ MDString *Name;
+ Metadata *File;
+ unsigned Line;
+ MDString *GetterName;
+ MDString *SetterName;
+ unsigned Attributes;
+ Metadata *Type;
+
+ MDNodeKeyImpl(MDString *Name, Metadata *File, unsigned Line,
+ MDString *GetterName, MDString *SetterName, unsigned Attributes,
+ Metadata *Type)
+ : Name(Name), File(File), Line(Line), GetterName(GetterName),
+ SetterName(SetterName), Attributes(Attributes), Type(Type) {}
+ MDNodeKeyImpl(const DIObjCProperty *N)
+ : Name(N->getRawName()), File(N->getRawFile()), Line(N->getLine()),
+ GetterName(N->getRawGetterName()), SetterName(N->getRawSetterName()),
+ Attributes(N->getAttributes()), Type(N->getRawType()) {}
+
+ bool isKeyOf(const DIObjCProperty *RHS) const {
+ return Name == RHS->getRawName() && File == RHS->getRawFile() &&
+ Line == RHS->getLine() && GetterName == RHS->getRawGetterName() &&
+ SetterName == RHS->getRawSetterName() &&
+ Attributes == RHS->getAttributes() && Type == RHS->getRawType();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Name, File, Line, GetterName, SetterName, Attributes,
+ Type);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIImportedEntity> {
+ unsigned Tag;
+ Metadata *Scope;
+ Metadata *Entity;
+ Metadata *File;
+ unsigned Line;
+ MDString *Name;
+ Metadata *Elements;
+
+ MDNodeKeyImpl(unsigned Tag, Metadata *Scope, Metadata *Entity, Metadata *File,
+ unsigned Line, MDString *Name, Metadata *Elements)
+ : Tag(Tag), Scope(Scope), Entity(Entity), File(File), Line(Line),
+ Name(Name), Elements(Elements) {}
+ MDNodeKeyImpl(const DIImportedEntity *N)
+ : Tag(N->getTag()), Scope(N->getRawScope()), Entity(N->getRawEntity()),
+ File(N->getRawFile()), Line(N->getLine()), Name(N->getRawName()),
+ Elements(N->getRawElements()) {}
+
+ bool isKeyOf(const DIImportedEntity *RHS) const {
+ return Tag == RHS->getTag() && Scope == RHS->getRawScope() &&
+ Entity == RHS->getRawEntity() && File == RHS->getFile() &&
+ Line == RHS->getLine() && Name == RHS->getRawName() &&
+ Elements == RHS->getRawElements();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(Tag, Scope, Entity, File, Line, Name, Elements);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIMacro> {
+ unsigned MIType;
+ unsigned Line;
+ MDString *Name;
+ MDString *Value;
+
+ MDNodeKeyImpl(unsigned MIType, unsigned Line, MDString *Name, MDString *Value)
+ : MIType(MIType), Line(Line), Name(Name), Value(Value) {}
+ MDNodeKeyImpl(const DIMacro *N)
+ : MIType(N->getMacinfoType()), Line(N->getLine()), Name(N->getRawName()),
+ Value(N->getRawValue()) {}
+
+ bool isKeyOf(const DIMacro *RHS) const {
+ return MIType == RHS->getMacinfoType() && Line == RHS->getLine() &&
+ Name == RHS->getRawName() && Value == RHS->getRawValue();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(MIType, Line, Name, Value);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIMacroFile> {
+ unsigned MIType;
+ unsigned Line;
+ Metadata *File;
+ Metadata *Elements;
+
+ MDNodeKeyImpl(unsigned MIType, unsigned Line, Metadata *File,
+ Metadata *Elements)
+ : MIType(MIType), Line(Line), File(File), Elements(Elements) {}
+ MDNodeKeyImpl(const DIMacroFile *N)
+ : MIType(N->getMacinfoType()), Line(N->getLine()), File(N->getRawFile()),
+ Elements(N->getRawElements()) {}
+
+ bool isKeyOf(const DIMacroFile *RHS) const {
+ return MIType == RHS->getMacinfoType() && Line == RHS->getLine() &&
+ File == RHS->getRawFile() && Elements == RHS->getRawElements();
+ }
+
+ unsigned getHashValue() const {
+ return hash_combine(MIType, Line, File, Elements);
+ }
+};
+
+template <> struct MDNodeKeyImpl<DIArgList> {
+ ArrayRef<ValueAsMetadata *> Args;
+
+ MDNodeKeyImpl(ArrayRef<ValueAsMetadata *> Args) : Args(Args) {}
+ MDNodeKeyImpl(const DIArgList *N) : Args(N->getArgs()) {}
+
+ bool isKeyOf(const DIArgList *RHS) const { return Args == RHS->getArgs(); }
+
+ unsigned getHashValue() const {
+ return hash_combine_range(Args.begin(), Args.end());
+ }
+};
+
+/// DenseMapInfo for MDNode subclasses.
+template <class NodeTy> struct MDNodeInfo {
+ using KeyTy = MDNodeKeyImpl<NodeTy>;
+ using SubsetEqualTy = MDNodeSubsetEqualImpl<NodeTy>;
+
+ static inline NodeTy *getEmptyKey() {
+ return DenseMapInfo<NodeTy *>::getEmptyKey();
+ }
+
+ static inline NodeTy *getTombstoneKey() {
+ return DenseMapInfo<NodeTy *>::getTombstoneKey();
+ }
+
+ static unsigned getHashValue(const KeyTy &Key) { return Key.getHashValue(); }
+
+ static unsigned getHashValue(const NodeTy *N) {
+ return KeyTy(N).getHashValue();
+ }
+
+ static bool isEqual(const KeyTy &LHS, const NodeTy *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return SubsetEqualTy::isSubsetEqual(LHS, RHS) || LHS.isKeyOf(RHS);
+ }
+
+ static bool isEqual(const NodeTy *LHS, const NodeTy *RHS) {
+ if (LHS == RHS)
+ return true;
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ return SubsetEqualTy::isSubsetEqual(LHS, RHS);
+ }
+};
+
+#define HANDLE_MDNODE_LEAF(CLASS) using CLASS##Info = MDNodeInfo<CLASS>;
+#include "llvm/IR/Metadata.def"
+
+/// Multimap-like storage for metadata attachments.
+class MDAttachments {
+public:
+ struct Attachment {
+ unsigned MDKind;
+ TrackingMDNodeRef Node;
+ };
+
+private:
+ SmallVector<Attachment, 1> Attachments;
+
+public:
+ bool empty() const { return Attachments.empty(); }
+ size_t size() const { return Attachments.size(); }
+
+ /// Returns the first attachment with the given ID or nullptr if no such
+ /// attachment exists.
+ MDNode *lookup(unsigned ID) const;
+
+ /// Appends all attachments with the given ID to \c Result in insertion order.
+ /// If the global has no attachments with the given ID, or if ID is invalid,
+ /// leaves Result unchanged.
+ void get(unsigned ID, SmallVectorImpl<MDNode *> &Result) const;
+
+ /// Appends all attachments for the global to \c Result, sorting by attachment
+ /// ID. Attachments with the same ID appear in insertion order. This function
+ /// does \em not clear \c Result.
+ void getAll(SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const;
+
+ /// Set an attachment to a particular node.
+ ///
+ /// Set the \c ID attachment to \c MD, replacing the current attachments at \c
+ /// ID (if anyway).
+ void set(unsigned ID, MDNode *MD);
+
+ /// Adds an attachment to a particular node.
+ void insert(unsigned ID, MDNode &MD);
+
+ /// Remove attachments with the given ID.
+ ///
+ /// Remove the attachments at \c ID, if any.
+ bool erase(unsigned ID);
+
+ /// Erase matching attachments.
+ ///
+ /// Erases all attachments matching the \c shouldRemove predicate.
+ template <class PredTy> void remove_if(PredTy shouldRemove) {
+ llvm::erase_if(Attachments, shouldRemove);
+ }
+};
+
+class LLVMContextImpl {
+public:
+ /// OwnedModules - The set of modules instantiated in this context, and which
+ /// will be automatically deleted if this context is deleted.
+ SmallPtrSet<Module *, 4> OwnedModules;
+
+ /// The main remark streamer used by all the other streamers (e.g. IR, MIR,
+ /// frontends, etc.). This should only be used by the specific streamers, and
+ /// never directly.
+ std::unique_ptr<remarks::RemarkStreamer> MainRemarkStreamer;
+
+ std::unique_ptr<DiagnosticHandler> DiagHandler;
+ bool RespectDiagnosticFilters = false;
+ bool DiagnosticsHotnessRequested = false;
+ /// The minimum hotness value a diagnostic needs in order to be included in
+ /// optimization diagnostics.
+ ///
+ /// The threshold is an Optional value, which maps to one of the 3 states:
+ /// 1). 0 => threshold disabled. All emarks will be printed.
+ /// 2). positive int => manual threshold by user. Remarks with hotness exceed
+ /// threshold will be printed.
+ /// 3). None => 'auto' threshold by user. The actual value is not
+ /// available at command line, but will be synced with
+ /// hotness threhold from profile summary during
+ /// compilation.
+ ///
+ /// State 1 and 2 are considered as terminal states. State transition is
+ /// only allowed from 3 to 2, when the threshold is first synced with profile
+ /// summary. This ensures that the threshold is set only once and stays
+ /// constant.
+ ///
+ /// If threshold option is not specified, it is disabled (0) by default.
+ Optional<uint64_t> DiagnosticsHotnessThreshold = 0;
+
+ /// The percentage of difference between profiling branch weights and
+ // llvm.expect branch weights to tolerate when emiting MisExpect diagnostics
+ Optional<uint64_t> DiagnosticsMisExpectTolerance = 0;
+ bool MisExpectWarningRequested = false;
+
+ /// The specialized remark streamer used by LLVM's OptimizationRemarkEmitter.
+ std::unique_ptr<LLVMRemarkStreamer> LLVMRS;
+
+ LLVMContext::YieldCallbackTy YieldCallback = nullptr;
+ void *YieldOpaqueHandle = nullptr;
+
+ DenseMap<const Value *, ValueName *> ValueNames;
+
+ using IntMapTy =
+ DenseMap<APInt, std::unique_ptr<ConstantInt>, DenseMapAPIntKeyInfo>;
+ IntMapTy IntConstants;
+
+ using FPMapTy =
+ DenseMap<APFloat, std::unique_ptr<ConstantFP>, DenseMapAPFloatKeyInfo>;
+ FPMapTy FPConstants;
+
+ FoldingSet<AttributeImpl> AttrsSet;
+ FoldingSet<AttributeListImpl> AttrsLists;
+ FoldingSet<AttributeSetNode> AttrsSetNodes;
+
+ StringMap<MDString, BumpPtrAllocator> MDStringCache;
+ DenseMap<Value *, ValueAsMetadata *> ValuesAsMetadata;
+ DenseMap<Metadata *, MetadataAsValue *> MetadataAsValues;
+
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ DenseSet<CLASS *, CLASS##Info> CLASS##s;
+#include "llvm/IR/Metadata.def"
+
+ // Optional map for looking up composite types by identifier.
+ Optional<DenseMap<const MDString *, DICompositeType *>> DITypeMap;
+
+ // MDNodes may be uniqued or not uniqued. When they're not uniqued, they
+ // aren't in the MDNodeSet, but they're still shared between objects, so no
+ // one object can destroy them. Keep track of them here so we can delete
+ // them on context teardown.
+ std::vector<MDNode *> DistinctMDNodes;
+
+ DenseMap<Type *, std::unique_ptr<ConstantAggregateZero>> CAZConstants;
+
+ using ArrayConstantsTy = ConstantUniqueMap<ConstantArray>;
+ ArrayConstantsTy ArrayConstants;
+
+ using StructConstantsTy = ConstantUniqueMap<ConstantStruct>;
+ StructConstantsTy StructConstants;
+
+ using VectorConstantsTy = ConstantUniqueMap<ConstantVector>;
+ VectorConstantsTy VectorConstants;
+
+ DenseMap<PointerType *, std::unique_ptr<ConstantPointerNull>> CPNConstants;
+
+ DenseMap<Type *, std::unique_ptr<UndefValue>> UVConstants;
+
+ DenseMap<Type *, std::unique_ptr<PoisonValue>> PVConstants;
+
+ StringMap<std::unique_ptr<ConstantDataSequential>> CDSConstants;
+
+ DenseMap<std::pair<const Function *, const BasicBlock *>, BlockAddress *>
+ BlockAddresses;
+
+ DenseMap<const GlobalValue *, DSOLocalEquivalent *> DSOLocalEquivalents;
+
+ DenseMap<const GlobalValue *, NoCFIValue *> NoCFIValues;
+
+ ConstantUniqueMap<ConstantExpr> ExprConstants;
+
+ ConstantUniqueMap<InlineAsm> InlineAsms;
+
+ ConstantInt *TheTrueVal = nullptr;
+ ConstantInt *TheFalseVal = nullptr;
+
+ // Basic type instances.
+ Type VoidTy, LabelTy, HalfTy, BFloatTy, FloatTy, DoubleTy, MetadataTy,
+ TokenTy;
+ Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy, X86_AMXTy;
+ IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty, Int128Ty;
+
+ std::unique_ptr<ConstantTokenNone> TheNoneToken;
+
+ BumpPtrAllocator Alloc;
+ UniqueStringSaver Saver{Alloc};
+
+ DenseMap<unsigned, IntegerType *> IntegerTypes;
+
+ using FunctionTypeSet = DenseSet<FunctionType *, FunctionTypeKeyInfo>;
+ FunctionTypeSet FunctionTypes;
+ using StructTypeSet = DenseSet<StructType *, AnonStructTypeKeyInfo>;
+ StructTypeSet AnonStructTypes;
+ StringMap<StructType *> NamedStructTypes;
+ unsigned NamedStructTypesUniqueID = 0;
+
+ DenseMap<std::pair<Type *, uint64_t>, ArrayType *> ArrayTypes;
+ DenseMap<std::pair<Type *, ElementCount>, VectorType *> VectorTypes;
+ DenseMap<Type *, PointerType *> PointerTypes; // Pointers in AddrSpace = 0
+ DenseMap<std::pair<Type *, unsigned>, PointerType *> ASPointerTypes;
+
+ /// ValueHandles - This map keeps track of all of the value handles that are
+ /// watching a Value*. The Value::HasValueHandle bit is used to know
+ /// whether or not a value has an entry in this map.
+ using ValueHandlesTy = DenseMap<Value *, ValueHandleBase *>;
+ ValueHandlesTy ValueHandles;
+
+ /// CustomMDKindNames - Map to hold the metadata string to ID mapping.
+ StringMap<unsigned> CustomMDKindNames;
+
+ /// Collection of metadata used in this context.
+ DenseMap<const Value *, MDAttachments> ValueMetadata;
+
+ /// Collection of per-GlobalObject sections used in this context.
+ DenseMap<const GlobalObject *, StringRef> GlobalObjectSections;
+
+ /// Collection of per-GlobalValue partitions used in this context.
+ DenseMap<const GlobalValue *, StringRef> GlobalValuePartitions;
+
+ DenseMap<const GlobalValue *, GlobalValue::SanitizerMetadata>
+ GlobalValueSanitizerMetadata;
+
+ /// DiscriminatorTable - This table maps file:line locations to an
+ /// integer representing the next DWARF path discriminator to assign to
+ /// instructions in different blocks at the same location.
+ DenseMap<std::pair<const char *, unsigned>, unsigned> DiscriminatorTable;
+
+ /// A set of interned tags for operand bundles. The StringMap maps
+ /// bundle tags to their IDs.
+ ///
+ /// \see LLVMContext::getOperandBundleTagID
+ StringMap<uint32_t> BundleTagCache;
+
+ StringMapEntry<uint32_t> *getOrInsertBundleTag(StringRef Tag);
+ void getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const;
+ uint32_t getOperandBundleTagID(StringRef Tag) const;
+
+ /// A set of interned synchronization scopes. The StringMap maps
+ /// synchronization scope names to their respective synchronization scope IDs.
+ StringMap<SyncScope::ID> SSC;
+
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
+ /// Maintain the GC name for each function.
+ ///
+ /// This saves allocating an additional word in Function for programs which
+ /// do not use GC (i.e., most programs) at the cost of increased overhead for
+ /// clients which do use GC.
+ DenseMap<const Function *, std::string> GCNames;
+
+ /// Flag to indicate if Value (other than GlobalValue) retains their name or
+ /// not.
+ bool DiscardValueNames = false;
+
+ LLVMContextImpl(LLVMContext &C);
+ ~LLVMContextImpl();
+
+ /// Destroy the ConstantArrays if they are not used.
+ void dropTriviallyDeadConstantArrays();
+
+ mutable OptPassGate *OPG = nullptr;
+
+ /// Access the object which can disable optional passes and individual
+ /// optimizations at compile time.
+ OptPassGate &getOptPassGate() const;
+
+ /// Set the object which can disable optional passes and individual
+ /// optimizations at compile time.
+ ///
+ /// The lifetime of the object must be guaranteed to extend as long as the
+ /// LLVMContext is used by compilation.
+ void setOptPassGate(OptPassGate &);
+
+ // TODO: clean up the following after we no longer support non-opaque pointer
+ // types.
+ bool getOpaquePointers();
+ bool hasOpaquePointersValue();
+ void setOpaquePointers(bool OP);
+
+ llvm::Any TargetDataStorage;
+
+private:
+ Optional<bool> OpaquePointers;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_IR_LLVMCONTEXTIMPL_H
diff --git a/contrib/llvm-project/llvm/lib/IR/LLVMRemarkStreamer.cpp b/contrib/llvm-project/llvm/lib/IR/LLVMRemarkStreamer.cpp
new file mode 100644
index 000000000000..f7e2aa4e9a35
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/LLVMRemarkStreamer.cpp
@@ -0,0 +1,173 @@
+//===- llvm/IR/LLVMRemarkStreamer.cpp - Remark Streamer -*- C++ ---------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the conversion between IR
+// Diagnostics and serializable remarks::Remark objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Remarks/RemarkStreamer.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ToolOutputFile.h"
+
+using namespace llvm;
+
+/// DiagnosticKind -> remarks::Type
+static remarks::Type toRemarkType(enum DiagnosticKind Kind) {
+ switch (Kind) {
+ default:
+ return remarks::Type::Unknown;
+ case DK_OptimizationRemark:
+ case DK_MachineOptimizationRemark:
+ return remarks::Type::Passed;
+ case DK_OptimizationRemarkMissed:
+ case DK_MachineOptimizationRemarkMissed:
+ return remarks::Type::Missed;
+ case DK_OptimizationRemarkAnalysis:
+ case DK_MachineOptimizationRemarkAnalysis:
+ return remarks::Type::Analysis;
+ case DK_OptimizationRemarkAnalysisFPCommute:
+ return remarks::Type::AnalysisFPCommute;
+ case DK_OptimizationRemarkAnalysisAliasing:
+ return remarks::Type::AnalysisAliasing;
+ case DK_OptimizationFailure:
+ return remarks::Type::Failure;
+ }
+}
+
+/// DiagnosticLocation -> remarks::RemarkLocation.
+static Optional<remarks::RemarkLocation>
+toRemarkLocation(const DiagnosticLocation &DL) {
+ if (!DL.isValid())
+ return None;
+ StringRef File = DL.getRelativePath();
+ unsigned Line = DL.getLine();
+ unsigned Col = DL.getColumn();
+ return remarks::RemarkLocation{File, Line, Col};
+}
+
+/// LLVM Diagnostic -> Remark
+remarks::Remark
+LLVMRemarkStreamer::toRemark(const DiagnosticInfoOptimizationBase &Diag) const {
+ remarks::Remark R; // The result.
+ R.RemarkType = toRemarkType(static_cast<DiagnosticKind>(Diag.getKind()));
+ R.PassName = Diag.getPassName();
+ R.RemarkName = Diag.getRemarkName();
+ R.FunctionName =
+ GlobalValue::dropLLVMManglingEscape(Diag.getFunction().getName());
+ R.Loc = toRemarkLocation(Diag.getLocation());
+ R.Hotness = Diag.getHotness();
+
+ for (const DiagnosticInfoOptimizationBase::Argument &Arg : Diag.getArgs()) {
+ R.Args.emplace_back();
+ R.Args.back().Key = Arg.Key;
+ R.Args.back().Val = Arg.Val;
+ R.Args.back().Loc = toRemarkLocation(Arg.Loc);
+ }
+
+ return R;
+}
+
+void LLVMRemarkStreamer::emit(const DiagnosticInfoOptimizationBase &Diag) {
+ if (!RS.matchesFilter(Diag.getPassName()))
+ return;
+
+ // First, convert the diagnostic to a remark.
+ remarks::Remark R = toRemark(Diag);
+ // Then, emit the remark through the serializer.
+ RS.getSerializer().emit(R);
+}
+
+char LLVMRemarkSetupFileError::ID = 0;
+char LLVMRemarkSetupPatternError::ID = 0;
+char LLVMRemarkSetupFormatError::ID = 0;
+
+Expected<std::unique_ptr<ToolOutputFile>> llvm::setupLLVMOptimizationRemarks(
+ LLVMContext &Context, StringRef RemarksFilename, StringRef RemarksPasses,
+ StringRef RemarksFormat, bool RemarksWithHotness,
+ Optional<uint64_t> RemarksHotnessThreshold) {
+ if (RemarksWithHotness)
+ Context.setDiagnosticsHotnessRequested(true);
+
+ Context.setDiagnosticsHotnessThreshold(RemarksHotnessThreshold);
+
+ if (RemarksFilename.empty())
+ return nullptr;
+
+ Expected<remarks::Format> Format = remarks::parseFormat(RemarksFormat);
+ if (Error E = Format.takeError())
+ return make_error<LLVMRemarkSetupFormatError>(std::move(E));
+
+ std::error_code EC;
+ auto Flags = *Format == remarks::Format::YAML ? sys::fs::OF_TextWithCRLF
+ : sys::fs::OF_None;
+ auto RemarksFile =
+ std::make_unique<ToolOutputFile>(RemarksFilename, EC, Flags);
+ // We don't use llvm::FileError here because some diagnostics want the file
+ // name separately.
+ if (EC)
+ return make_error<LLVMRemarkSetupFileError>(errorCodeToError(EC));
+
+ Expected<std::unique_ptr<remarks::RemarkSerializer>> RemarkSerializer =
+ remarks::createRemarkSerializer(
+ *Format, remarks::SerializerMode::Separate, RemarksFile->os());
+ if (Error E = RemarkSerializer.takeError())
+ return make_error<LLVMRemarkSetupFormatError>(std::move(E));
+
+ // Create the main remark streamer.
+ Context.setMainRemarkStreamer(std::make_unique<remarks::RemarkStreamer>(
+ std::move(*RemarkSerializer), RemarksFilename));
+
+ // Create LLVM's optimization remarks streamer.
+ Context.setLLVMRemarkStreamer(
+ std::make_unique<LLVMRemarkStreamer>(*Context.getMainRemarkStreamer()));
+
+ if (!RemarksPasses.empty())
+ if (Error E = Context.getMainRemarkStreamer()->setFilter(RemarksPasses))
+ return make_error<LLVMRemarkSetupPatternError>(std::move(E));
+
+ return std::move(RemarksFile);
+}
+
+Error llvm::setupLLVMOptimizationRemarks(
+ LLVMContext &Context, raw_ostream &OS, StringRef RemarksPasses,
+ StringRef RemarksFormat, bool RemarksWithHotness,
+ Optional<uint64_t> RemarksHotnessThreshold) {
+ if (RemarksWithHotness)
+ Context.setDiagnosticsHotnessRequested(true);
+
+ Context.setDiagnosticsHotnessThreshold(RemarksHotnessThreshold);
+
+ Expected<remarks::Format> Format = remarks::parseFormat(RemarksFormat);
+ if (Error E = Format.takeError())
+ return make_error<LLVMRemarkSetupFormatError>(std::move(E));
+
+ Expected<std::unique_ptr<remarks::RemarkSerializer>> RemarkSerializer =
+ remarks::createRemarkSerializer(*Format,
+ remarks::SerializerMode::Separate, OS);
+ if (Error E = RemarkSerializer.takeError())
+ return make_error<LLVMRemarkSetupFormatError>(std::move(E));
+
+ // Create the main remark streamer.
+ Context.setMainRemarkStreamer(
+ std::make_unique<remarks::RemarkStreamer>(std::move(*RemarkSerializer)));
+
+ // Create LLVM's optimization remarks streamer.
+ Context.setLLVMRemarkStreamer(
+ std::make_unique<LLVMRemarkStreamer>(*Context.getMainRemarkStreamer()));
+
+ if (!RemarksPasses.empty())
+ if (Error E = Context.getMainRemarkStreamer()->setFilter(RemarksPasses))
+ return make_error<LLVMRemarkSetupPatternError>(std::move(E));
+
+ return Error::success();
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp b/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp
new file mode 100644
index 000000000000..ef3465177647
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/LegacyPassManager.cpp
@@ -0,0 +1,1766 @@
+//===- LegacyPassManager.cpp - LLVM Pass Infrastructure Implementation ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the legacy LLVM Pass Manager infrastructure.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassTimingInfo.h"
+#include "llvm/IR/PrintPasses.h"
+#include "llvm/Support/Chrono.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace llvm;
+
+// See PassManagers.h for Pass Manager infrastructure overview.
+
+//===----------------------------------------------------------------------===//
+// Pass debugging information. Often it is useful to find out what pass is
+// running when a crash occurs in a utility. When this library is compiled with
+// debugging on, a command line option (--debug-pass) is enabled that causes the
+// pass name to be printed before it executes.
+//
+
+namespace {
+// Different debug levels that can be enabled...
+enum PassDebugLevel {
+ Disabled, Arguments, Structure, Executions, Details
+};
+} // namespace
+
+static cl::opt<enum PassDebugLevel> PassDebugging(
+ "debug-pass", cl::Hidden,
+ cl::desc("Print legacy PassManager debugging information"),
+ cl::values(clEnumVal(Disabled, "disable debug output"),
+ clEnumVal(Arguments, "print pass arguments to pass to 'opt'"),
+ clEnumVal(Structure, "print pass structure before run()"),
+ clEnumVal(Executions, "print pass name before it is executed"),
+ clEnumVal(Details, "print pass details when it is executed")));
+
+/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
+/// or higher is specified.
+bool PMDataManager::isPassDebuggingExecutionsOrMore() const {
+ return PassDebugging >= Executions;
+}
+
+unsigned PMDataManager::initSizeRemarkInfo(
+ Module &M, StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount) {
+ // Only calculate getInstructionCount if the size-info remark is requested.
+ unsigned InstrCount = 0;
+
+ // Collect instruction counts for every function. We'll use this to emit
+ // per-function size remarks later.
+ for (Function &F : M) {
+ unsigned FCount = F.getInstructionCount();
+
+ // Insert a record into FunctionToInstrCount keeping track of the current
+ // size of the function as the first member of a pair. Set the second
+ // member to 0; if the function is deleted by the pass, then when we get
+ // here, we'll be able to let the user know that F no longer contributes to
+ // the module.
+ FunctionToInstrCount[F.getName().str()] =
+ std::pair<unsigned, unsigned>(FCount, 0);
+ InstrCount += FCount;
+ }
+ return InstrCount;
+}
+
+void PMDataManager::emitInstrCountChangedRemark(
+ Pass *P, Module &M, int64_t Delta, unsigned CountBefore,
+ StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount,
+ Function *F) {
+ // If it's a pass manager, don't emit a remark. (This hinges on the assumption
+ // that the only passes that return non-null with getAsPMDataManager are pass
+ // managers.) The reason we have to do this is to avoid emitting remarks for
+ // CGSCC passes.
+ if (P->getAsPMDataManager())
+ return;
+
+ // Set to true if this isn't a module pass or CGSCC pass.
+ bool CouldOnlyImpactOneFunction = (F != nullptr);
+
+ // Helper lambda that updates the changes to the size of some function.
+ auto UpdateFunctionChanges =
+ [&FunctionToInstrCount](Function &MaybeChangedFn) {
+ // Update the total module count.
+ unsigned FnSize = MaybeChangedFn.getInstructionCount();
+ auto It = FunctionToInstrCount.find(MaybeChangedFn.getName());
+
+ // If we created a new function, then we need to add it to the map and
+ // say that it changed from 0 instructions to FnSize.
+ if (It == FunctionToInstrCount.end()) {
+ FunctionToInstrCount[MaybeChangedFn.getName()] =
+ std::pair<unsigned, unsigned>(0, FnSize);
+ return;
+ }
+ // Insert the new function size into the second member of the pair. This
+ // tells us whether or not this function changed in size.
+ It->second.second = FnSize;
+ };
+
+ // We need to initially update all of the function sizes.
+ // If no function was passed in, then we're either a module pass or an
+ // CGSCC pass.
+ if (!CouldOnlyImpactOneFunction)
+ std::for_each(M.begin(), M.end(), UpdateFunctionChanges);
+ else
+ UpdateFunctionChanges(*F);
+
+ // Do we have a function we can use to emit a remark?
+ if (!CouldOnlyImpactOneFunction) {
+ // We need a function containing at least one basic block in order to output
+ // remarks. Since it's possible that the first function in the module
+ // doesn't actually contain a basic block, we have to go and find one that's
+ // suitable for emitting remarks.
+ auto It = llvm::find_if(M, [](const Function &Fn) { return !Fn.empty(); });
+
+ // Didn't find a function. Quit.
+ if (It == M.end())
+ return;
+
+ // We found a function containing at least one basic block.
+ F = &*It;
+ }
+ int64_t CountAfter = static_cast<int64_t>(CountBefore) + Delta;
+ BasicBlock &BB = *F->begin();
+ OptimizationRemarkAnalysis R("size-info", "IRSizeChange",
+ DiagnosticLocation(), &BB);
+ // FIXME: Move ore namespace to DiagnosticInfo so that we can use it. This
+ // would let us use NV instead of DiagnosticInfoOptimizationBase::Argument.
+ R << DiagnosticInfoOptimizationBase::Argument("Pass", P->getPassName())
+ << ": IR instruction count changed from "
+ << DiagnosticInfoOptimizationBase::Argument("IRInstrsBefore", CountBefore)
+ << " to "
+ << DiagnosticInfoOptimizationBase::Argument("IRInstrsAfter", CountAfter)
+ << "; Delta: "
+ << DiagnosticInfoOptimizationBase::Argument("DeltaInstrCount", Delta);
+ F->getContext().diagnose(R); // Not using ORE for layering reasons.
+
+ // Emit per-function size change remarks separately.
+ std::string PassName = P->getPassName().str();
+
+ // Helper lambda that emits a remark when the size of a function has changed.
+ auto EmitFunctionSizeChangedRemark = [&FunctionToInstrCount, &F, &BB,
+ &PassName](StringRef Fname) {
+ unsigned FnCountBefore, FnCountAfter;
+ std::pair<unsigned, unsigned> &Change = FunctionToInstrCount[Fname];
+ std::tie(FnCountBefore, FnCountAfter) = Change;
+ int64_t FnDelta = static_cast<int64_t>(FnCountAfter) -
+ static_cast<int64_t>(FnCountBefore);
+
+ if (FnDelta == 0)
+ return;
+
+ // FIXME: We shouldn't use BB for the location here. Unfortunately, because
+ // the function that we're looking at could have been deleted, we can't use
+ // it for the source location. We *want* remarks when a function is deleted
+ // though, so we're kind of stuck here as is. (This remark, along with the
+ // whole-module size change remarks really ought not to have source
+ // locations at all.)
+ OptimizationRemarkAnalysis FR("size-info", "FunctionIRSizeChange",
+ DiagnosticLocation(), &BB);
+ FR << DiagnosticInfoOptimizationBase::Argument("Pass", PassName)
+ << ": Function: "
+ << DiagnosticInfoOptimizationBase::Argument("Function", Fname)
+ << ": IR instruction count changed from "
+ << DiagnosticInfoOptimizationBase::Argument("IRInstrsBefore",
+ FnCountBefore)
+ << " to "
+ << DiagnosticInfoOptimizationBase::Argument("IRInstrsAfter",
+ FnCountAfter)
+ << "; Delta: "
+ << DiagnosticInfoOptimizationBase::Argument("DeltaInstrCount", FnDelta);
+ F->getContext().diagnose(FR);
+
+ // Update the function size.
+ Change.first = FnCountAfter;
+ };
+
+ // Are we looking at more than one function? If so, emit remarks for all of
+ // the functions in the module. Otherwise, only emit one remark.
+ if (!CouldOnlyImpactOneFunction)
+ std::for_each(FunctionToInstrCount.keys().begin(),
+ FunctionToInstrCount.keys().end(),
+ EmitFunctionSizeChangedRemark);
+ else
+ EmitFunctionSizeChangedRemark(F->getName().str());
+}
+
+void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
+ if (!V && !M)
+ OS << "Releasing pass '";
+ else
+ OS << "Running pass '";
+
+ OS << P->getPassName() << "'";
+
+ if (M) {
+ OS << " on module '" << M->getModuleIdentifier() << "'.\n";
+ return;
+ }
+ if (!V) {
+ OS << '\n';
+ return;
+ }
+
+ OS << " on ";
+ if (isa<Function>(V))
+ OS << "function";
+ else if (isa<BasicBlock>(V))
+ OS << "basic block";
+ else
+ OS << "value";
+
+ OS << " '";
+ V->printAsOperand(OS, /*PrintType=*/false, M);
+ OS << "'\n";
+}
+
+namespace llvm {
+namespace legacy {
+bool debugPassSpecified() { return PassDebugging != Disabled; }
+
+//===----------------------------------------------------------------------===//
+// FunctionPassManagerImpl
+//
+/// FunctionPassManagerImpl manages FPPassManagers
+class FunctionPassManagerImpl : public Pass,
+ public PMDataManager,
+ public PMTopLevelManager {
+ virtual void anchor();
+private:
+ bool wasRun;
+public:
+ static char ID;
+ explicit FunctionPassManagerImpl()
+ : Pass(PT_PassManager, ID), PMTopLevelManager(new FPPassManager()),
+ wasRun(false) {}
+
+ /// \copydoc FunctionPassManager::add()
+ void add(Pass *P) {
+ schedulePass(P);
+ }
+
+ /// createPrinterPass - Get a function printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintFunctionPass(O, Banner);
+ }
+
+ // Prepare for running an on the fly pass, freeing memory if needed
+ // from a previous run.
+ void releaseMemoryOnTheFly();
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool run(Function &F);
+
+ /// doInitialization - Run all of the initializers for the function passes.
+ ///
+ bool doInitialization(Module &M) override;
+
+ /// doFinalization - Run all of the finalizers for the function passes.
+ ///
+ bool doFinalization(Module &M) override;
+
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+ PassManagerType getTopLevelPassManagerType() override {
+ return PMT_FunctionPassManager;
+ }
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ FPPassManager *getContainedManager(unsigned N) {
+ assert(N < PassManagers.size() && "Pass number out of range!");
+ FPPassManager *FP = static_cast<FPPassManager *>(PassManagers[N]);
+ return FP;
+ }
+
+ void dumpPassStructure(unsigned Offset) override {
+ for (unsigned I = 0; I < getNumContainedManagers(); ++I)
+ getContainedManager(I)->dumpPassStructure(Offset);
+ }
+};
+
+void FunctionPassManagerImpl::anchor() {}
+
+char FunctionPassManagerImpl::ID = 0;
+
+//===----------------------------------------------------------------------===//
+// FunctionPassManagerImpl implementation
+//
+bool FunctionPassManagerImpl::doInitialization(Module &M) {
+ bool Changed = false;
+
+ dumpArguments();
+ dumpPasses();
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doInitialization(M);
+
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
+ Changed |= getContainedManager(Index)->doInitialization(M);
+
+ return Changed;
+}
+
+bool FunctionPassManagerImpl::doFinalization(Module &M) {
+ bool Changed = false;
+
+ for (int Index = getNumContainedManagers() - 1; Index >= 0; --Index)
+ Changed |= getContainedManager(Index)->doFinalization(M);
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doFinalization(M);
+
+ return Changed;
+}
+
+void FunctionPassManagerImpl::releaseMemoryOnTheFly() {
+ if (!wasRun)
+ return;
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
+ FPPassManager *FPPM = getContainedManager(Index);
+ for (unsigned Index = 0; Index < FPPM->getNumContainedPasses(); ++Index) {
+ FPPM->getContainedPass(Index)->releaseMemory();
+ }
+ }
+ wasRun = false;
+}
+
+// Execute all the passes managed by this top level manager.
+// Return true if any function is modified by a pass.
+bool FunctionPassManagerImpl::run(Function &F) {
+ bool Changed = false;
+
+ initializeAllAnalysisInfo();
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
+ Changed |= getContainedManager(Index)->runOnFunction(F);
+ F.getContext().yield();
+ }
+
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index)
+ getContainedManager(Index)->cleanup();
+
+ wasRun = true;
+ return Changed;
+}
+} // namespace legacy
+} // namespace llvm
+
+namespace {
+//===----------------------------------------------------------------------===//
+// MPPassManager
+//
+/// MPPassManager manages ModulePasses and function pass managers.
+/// It batches all Module passes and function pass managers together and
+/// sequences them to process one module.
+class MPPassManager : public Pass, public PMDataManager {
+public:
+ static char ID;
+ explicit MPPassManager() : Pass(PT_PassManager, ID) {}
+
+ // Delete on the fly managers.
+ ~MPPassManager() override {
+ for (auto &OnTheFlyManager : OnTheFlyManagers) {
+ legacy::FunctionPassManagerImpl *FPP = OnTheFlyManager.second;
+ delete FPP;
+ }
+ }
+
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintModulePass(O, Banner);
+ }
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool runOnModule(Module &M);
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ /// Add RequiredPass into list of lower level passes required by pass P.
+ /// RequiredPass is run on the fly by Pass Manager when P requests it
+ /// through getAnalysis interface.
+ void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) override;
+
+ /// Return function pass corresponding to PassInfo PI, that is
+ /// required by module pass MP. Instantiate analysis pass, by using
+ /// its runOnFunction() for function F.
+ std::tuple<Pass *, bool> getOnTheFlyPass(Pass *MP, AnalysisID PI,
+ Function &F) override;
+
+ StringRef getPassName() const override { return "Module Pass Manager"; }
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+
+ // Print passes managed by this manager
+ void dumpPassStructure(unsigned Offset) override {
+ dbgs().indent(Offset*2) << "ModulePass Manager\n";
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ ModulePass *MP = getContainedPass(Index);
+ MP->dumpPassStructure(Offset + 1);
+ MapVector<Pass *, legacy::FunctionPassManagerImpl *>::const_iterator I =
+ OnTheFlyManagers.find(MP);
+ if (I != OnTheFlyManagers.end())
+ I->second->dumpPassStructure(Offset + 2);
+ dumpLastUses(MP, Offset+1);
+ }
+ }
+
+ ModulePass *getContainedPass(unsigned N) {
+ assert(N < PassVector.size() && "Pass number out of range!");
+ return static_cast<ModulePass *>(PassVector[N]);
+ }
+
+ PassManagerType getPassManagerType() const override {
+ return PMT_ModulePassManager;
+ }
+
+ private:
+ /// Collection of on the fly FPPassManagers. These managers manage
+ /// function passes that are required by module passes.
+ MapVector<Pass *, legacy::FunctionPassManagerImpl *> OnTheFlyManagers;
+};
+
+char MPPassManager::ID = 0;
+} // End anonymous namespace
+
+namespace llvm {
+namespace legacy {
+//===----------------------------------------------------------------------===//
+// PassManagerImpl
+//
+
+/// PassManagerImpl manages MPPassManagers
+class PassManagerImpl : public Pass,
+ public PMDataManager,
+ public PMTopLevelManager {
+ virtual void anchor();
+
+public:
+ static char ID;
+ explicit PassManagerImpl()
+ : Pass(PT_PassManager, ID), PMTopLevelManager(new MPPassManager()) {}
+
+ /// \copydoc PassManager::add()
+ void add(Pass *P) {
+ schedulePass(P);
+ }
+
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override {
+ return createPrintModulePass(O, Banner);
+ }
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool run(Module &M);
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+ PassManagerType getTopLevelPassManagerType() override {
+ return PMT_ModulePassManager;
+ }
+
+ MPPassManager *getContainedManager(unsigned N) {
+ assert(N < PassManagers.size() && "Pass number out of range!");
+ MPPassManager *MP = static_cast<MPPassManager *>(PassManagers[N]);
+ return MP;
+ }
+};
+
+void PassManagerImpl::anchor() {}
+
+char PassManagerImpl::ID = 0;
+
+//===----------------------------------------------------------------------===//
+// PassManagerImpl implementation
+
+//
+/// run - Execute all of the passes scheduled for execution. Keep track of
+/// whether any of the passes modifies the module, and if so, return true.
+bool PassManagerImpl::run(Module &M) {
+ bool Changed = false;
+
+ dumpArguments();
+ dumpPasses();
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doInitialization(M);
+
+ initializeAllAnalysisInfo();
+ for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
+ Changed |= getContainedManager(Index)->runOnModule(M);
+ M.getContext().yield();
+ }
+
+ for (ImmutablePass *ImPass : getImmutablePasses())
+ Changed |= ImPass->doFinalization(M);
+
+ return Changed;
+}
+} // namespace legacy
+} // namespace llvm
+
+//===----------------------------------------------------------------------===//
+// PMTopLevelManager implementation
+
+/// Initialize top level manager. Create first pass manager.
+PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) {
+ PMDM->setTopLevelManager(this);
+ addPassManager(PMDM);
+ activeStack.push(PMDM);
+}
+
+/// Set pass P as the last user of the given analysis passes.
+void
+PMTopLevelManager::setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P) {
+ unsigned PDepth = 0;
+ if (P->getResolver())
+ PDepth = P->getResolver()->getPMDataManager().getDepth();
+
+ for (Pass *AP : AnalysisPasses) {
+ // Record P as the new last user of AP.
+ auto &LastUserOfAP = LastUser[AP];
+ if (LastUserOfAP)
+ InversedLastUser[LastUserOfAP].erase(AP);
+ LastUserOfAP = P;
+ InversedLastUser[P].insert(AP);
+
+ if (P == AP)
+ continue;
+
+ // Update the last users of passes that are required transitive by AP.
+ AnalysisUsage *AnUsage = findAnalysisUsage(AP);
+ const AnalysisUsage::VectorType &IDs = AnUsage->getRequiredTransitiveSet();
+ SmallVector<Pass *, 12> LastUses;
+ SmallVector<Pass *, 12> LastPMUses;
+ for (AnalysisID ID : IDs) {
+ Pass *AnalysisPass = findAnalysisPass(ID);
+ assert(AnalysisPass && "Expected analysis pass to exist.");
+ AnalysisResolver *AR = AnalysisPass->getResolver();
+ assert(AR && "Expected analysis resolver to exist.");
+ unsigned APDepth = AR->getPMDataManager().getDepth();
+
+ if (PDepth == APDepth)
+ LastUses.push_back(AnalysisPass);
+ else if (PDepth > APDepth)
+ LastPMUses.push_back(AnalysisPass);
+ }
+
+ setLastUser(LastUses, P);
+
+ // If this pass has a corresponding pass manager, push higher level
+ // analysis to this pass manager.
+ if (P->getResolver())
+ setLastUser(LastPMUses, P->getResolver()->getPMDataManager().getAsPass());
+
+ // If AP is the last user of other passes then make P last user of
+ // such passes.
+ auto &LastUsedByAP = InversedLastUser[AP];
+ for (Pass *L : LastUsedByAP)
+ LastUser[L] = P;
+ InversedLastUser[P].insert(LastUsedByAP.begin(), LastUsedByAP.end());
+ LastUsedByAP.clear();
+ }
+}
+
+/// Collect passes whose last user is P
+void PMTopLevelManager::collectLastUses(SmallVectorImpl<Pass *> &LastUses,
+ Pass *P) {
+ auto DMI = InversedLastUser.find(P);
+ if (DMI == InversedLastUser.end())
+ return;
+
+ auto &LU = DMI->second;
+ LastUses.append(LU.begin(), LU.end());
+}
+
+AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) {
+ AnalysisUsage *AnUsage = nullptr;
+ auto DMI = AnUsageMap.find(P);
+ if (DMI != AnUsageMap.end())
+ AnUsage = DMI->second;
+ else {
+ // Look up the analysis usage from the pass instance (different instances
+ // of the same pass can produce different results), but unique the
+ // resulting object to reduce memory usage. This helps to greatly reduce
+ // memory usage when we have many instances of only a few pass types
+ // (e.g. instcombine, simplifycfg, etc...) which tend to share a fixed set
+ // of dependencies.
+ AnalysisUsage AU;
+ P->getAnalysisUsage(AU);
+
+ AUFoldingSetNode* Node = nullptr;
+ FoldingSetNodeID ID;
+ AUFoldingSetNode::Profile(ID, AU);
+ void *IP = nullptr;
+ if (auto *N = UniqueAnalysisUsages.FindNodeOrInsertPos(ID, IP))
+ Node = N;
+ else {
+ Node = new (AUFoldingSetNodeAllocator.Allocate()) AUFoldingSetNode(AU);
+ UniqueAnalysisUsages.InsertNode(Node, IP);
+ }
+ assert(Node && "cached analysis usage must be non null");
+
+ AnUsageMap[P] = &Node->AU;
+ AnUsage = &Node->AU;
+ }
+ return AnUsage;
+}
+
+/// Schedule pass P for execution. Make sure that passes required by
+/// P are run before P is run. Update analysis info maintained by
+/// the manager. Remove dead passes. This is a recursive function.
+void PMTopLevelManager::schedulePass(Pass *P) {
+
+ // TODO : Allocate function manager for this pass, other wise required set
+ // may be inserted into previous function manager
+
+ // Give pass a chance to prepare the stage.
+ P->preparePassManager(activeStack);
+
+ // If P is an analysis pass and it is available then do not
+ // generate the analysis again. Stale analysis info should not be
+ // available at this point.
+ const PassInfo *PI = findAnalysisPassInfo(P->getPassID());
+ if (PI && PI->isAnalysis() && findAnalysisPass(P->getPassID())) {
+ // Remove any cached AnalysisUsage information.
+ AnUsageMap.erase(P);
+ delete P;
+ return;
+ }
+
+ AnalysisUsage *AnUsage = findAnalysisUsage(P);
+
+ bool checkAnalysis = true;
+ while (checkAnalysis) {
+ checkAnalysis = false;
+
+ const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
+ for (const AnalysisID ID : RequiredSet) {
+
+ Pass *AnalysisPass = findAnalysisPass(ID);
+ if (!AnalysisPass) {
+ const PassInfo *PI = findAnalysisPassInfo(ID);
+
+ if (!PI) {
+ // Pass P is not in the global PassRegistry
+ dbgs() << "Pass '" << P->getPassName() << "' is not initialized." << "\n";
+ dbgs() << "Verify if there is a pass dependency cycle." << "\n";
+ dbgs() << "Required Passes:" << "\n";
+ for (const AnalysisID ID2 : RequiredSet) {
+ if (ID == ID2)
+ break;
+ Pass *AnalysisPass2 = findAnalysisPass(ID2);
+ if (AnalysisPass2) {
+ dbgs() << "\t" << AnalysisPass2->getPassName() << "\n";
+ } else {
+ dbgs() << "\t" << "Error: Required pass not found! Possible causes:" << "\n";
+ dbgs() << "\t\t" << "- Pass misconfiguration (e.g.: missing macros)" << "\n";
+ dbgs() << "\t\t" << "- Corruption of the global PassRegistry" << "\n";
+ }
+ }
+ }
+
+ assert(PI && "Expected required passes to be initialized");
+ AnalysisPass = PI->createPass();
+ if (P->getPotentialPassManagerType () ==
+ AnalysisPass->getPotentialPassManagerType())
+ // Schedule analysis pass that is managed by the same pass manager.
+ schedulePass(AnalysisPass);
+ else if (P->getPotentialPassManagerType () >
+ AnalysisPass->getPotentialPassManagerType()) {
+ // Schedule analysis pass that is managed by a new manager.
+ schedulePass(AnalysisPass);
+ // Recheck analysis passes to ensure that required analyses that
+ // are already checked are still available.
+ checkAnalysis = true;
+ } else
+ // Do not schedule this analysis. Lower level analysis
+ // passes are run on the fly.
+ delete AnalysisPass;
+ }
+ }
+ }
+
+ // Now all required passes are available.
+ if (ImmutablePass *IP = P->getAsImmutablePass()) {
+ // P is a immutable pass and it will be managed by this
+ // top level manager. Set up analysis resolver to connect them.
+ PMDataManager *DM = getAsPMDataManager();
+ AnalysisResolver *AR = new AnalysisResolver(*DM);
+ P->setResolver(AR);
+ DM->initializeAnalysisImpl(P);
+ addImmutablePass(IP);
+ DM->recordAvailableAnalysis(IP);
+ return;
+ }
+
+ if (PI && !PI->isAnalysis() && shouldPrintBeforePass(PI->getPassArgument())) {
+ Pass *PP =
+ P->createPrinterPass(dbgs(), ("*** IR Dump Before " + P->getPassName() +
+ " (" + PI->getPassArgument() + ") ***")
+ .str());
+ PP->assignPassManager(activeStack, getTopLevelPassManagerType());
+ }
+
+ // Add the requested pass to the best available pass manager.
+ P->assignPassManager(activeStack, getTopLevelPassManagerType());
+
+ if (PI && !PI->isAnalysis() && shouldPrintAfterPass(PI->getPassArgument())) {
+ Pass *PP =
+ P->createPrinterPass(dbgs(), ("*** IR Dump After " + P->getPassName() +
+ " (" + PI->getPassArgument() + ") ***")
+ .str());
+ PP->assignPassManager(activeStack, getTopLevelPassManagerType());
+ }
+}
+
+/// Find the pass that implements Analysis AID. Search immutable
+/// passes and all pass managers. If desired pass is not found
+/// then return NULL.
+Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) {
+ // For immutable passes we have a direct mapping from ID to pass, so check
+ // that first.
+ if (Pass *P = ImmutablePassMap.lookup(AID))
+ return P;
+
+ // Check pass managers
+ for (PMDataManager *PassManager : PassManagers)
+ if (Pass *P = PassManager->findAnalysisPass(AID, false))
+ return P;
+
+ // Check other pass managers
+ for (PMDataManager *IndirectPassManager : IndirectPassManagers)
+ if (Pass *P = IndirectPassManager->findAnalysisPass(AID, false))
+ return P;
+
+ return nullptr;
+}
+
+const PassInfo *PMTopLevelManager::findAnalysisPassInfo(AnalysisID AID) const {
+ const PassInfo *&PI = AnalysisPassInfos[AID];
+ if (!PI)
+ PI = PassRegistry::getPassRegistry()->getPassInfo(AID);
+ else
+ assert(PI == PassRegistry::getPassRegistry()->getPassInfo(AID) &&
+ "The pass info pointer changed for an analysis ID!");
+
+ return PI;
+}
+
+void PMTopLevelManager::addImmutablePass(ImmutablePass *P) {
+ P->initializePass();
+ ImmutablePasses.push_back(P);
+
+ // Add this pass to the map from its analysis ID. We clobber any prior runs
+ // of the pass in the map so that the last one added is the one found when
+ // doing lookups.
+ AnalysisID AID = P->getPassID();
+ ImmutablePassMap[AID] = P;
+
+ // Also add any interfaces implemented by the immutable pass to the map for
+ // fast lookup.
+ const PassInfo *PassInf = findAnalysisPassInfo(AID);
+ assert(PassInf && "Expected all immutable passes to be initialized");
+ for (const PassInfo *ImmPI : PassInf->getInterfacesImplemented())
+ ImmutablePassMap[ImmPI->getTypeInfo()] = P;
+}
+
+// Print passes managed by this top level manager.
+void PMTopLevelManager::dumpPasses() const {
+
+ if (PassDebugging < Structure)
+ return;
+
+ // Print out the immutable passes
+ for (unsigned i = 0, e = ImmutablePasses.size(); i != e; ++i) {
+ ImmutablePasses[i]->dumpPassStructure(0);
+ }
+
+ // Every class that derives from PMDataManager also derives from Pass
+ // (sometimes indirectly), but there's no inheritance relationship
+ // between PMDataManager and Pass, so we have to getAsPass to get
+ // from a PMDataManager* to a Pass*.
+ for (PMDataManager *Manager : PassManagers)
+ Manager->getAsPass()->dumpPassStructure(1);
+}
+
+void PMTopLevelManager::dumpArguments() const {
+
+ if (PassDebugging < Arguments)
+ return;
+
+ dbgs() << "Pass Arguments: ";
+ for (ImmutablePass *P : ImmutablePasses)
+ if (const PassInfo *PI = findAnalysisPassInfo(P->getPassID())) {
+ assert(PI && "Expected all immutable passes to be initialized");
+ if (!PI->isAnalysisGroup())
+ dbgs() << " -" << PI->getPassArgument();
+ }
+ for (PMDataManager *PM : PassManagers)
+ PM->dumpPassArguments();
+ dbgs() << "\n";
+}
+
+void PMTopLevelManager::initializeAllAnalysisInfo() {
+ for (PMDataManager *PM : PassManagers)
+ PM->initializeAnalysisInfo();
+
+ // Initailize other pass managers
+ for (PMDataManager *IPM : IndirectPassManagers)
+ IPM->initializeAnalysisInfo();
+}
+
+/// Destructor
+PMTopLevelManager::~PMTopLevelManager() {
+ for (PMDataManager *PM : PassManagers)
+ delete PM;
+
+ for (ImmutablePass *P : ImmutablePasses)
+ delete P;
+}
+
+//===----------------------------------------------------------------------===//
+// PMDataManager implementation
+
+/// Augement AvailableAnalysis by adding analysis made available by pass P.
+void PMDataManager::recordAvailableAnalysis(Pass *P) {
+ AnalysisID PI = P->getPassID();
+
+ AvailableAnalysis[PI] = P;
+
+ assert(!AvailableAnalysis.empty());
+
+ // This pass is the current implementation of all of the interfaces it
+ // implements as well.
+ const PassInfo *PInf = TPM->findAnalysisPassInfo(PI);
+ if (!PInf) return;
+ for (const PassInfo *PI : PInf->getInterfacesImplemented())
+ AvailableAnalysis[PI->getTypeInfo()] = P;
+}
+
+// Return true if P preserves high level analysis used by other
+// passes managed by this manager
+bool PMDataManager::preserveHigherLevelAnalysis(Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+ if (AnUsage->getPreservesAll())
+ return true;
+
+ const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
+ for (Pass *P1 : HigherLevelAnalysis) {
+ if (P1->getAsImmutablePass() == nullptr &&
+ !is_contained(PreservedSet, P1->getPassID()))
+ return false;
+ }
+
+ return true;
+}
+
+/// verifyPreservedAnalysis -- Verify analysis preserved by pass P.
+void PMDataManager::verifyPreservedAnalysis(Pass *P) {
+ // Don't do this unless assertions are enabled.
+#ifdef NDEBUG
+ return;
+#endif
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+ const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
+
+ // Verify preserved analysis
+ for (AnalysisID AID : PreservedSet) {
+ if (Pass *AP = findAnalysisPass(AID, true)) {
+ TimeRegion PassTimer(getPassTimer(AP));
+ AP->verifyAnalysis();
+ }
+ }
+}
+
+/// Remove Analysis not preserved by Pass P
+void PMDataManager::removeNotPreservedAnalysis(Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+ if (AnUsage->getPreservesAll())
+ return;
+
+ const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
+ for (DenseMap<AnalysisID, Pass*>::iterator I = AvailableAnalysis.begin(),
+ E = AvailableAnalysis.end(); I != E; ) {
+ DenseMap<AnalysisID, Pass*>::iterator Info = I++;
+ if (Info->second->getAsImmutablePass() == nullptr &&
+ !is_contained(PreservedSet, Info->first)) {
+ // Remove this analysis
+ if (PassDebugging >= Details) {
+ Pass *S = Info->second;
+ dbgs() << " -- '" << P->getPassName() << "' is not preserving '";
+ dbgs() << S->getPassName() << "'\n";
+ }
+ AvailableAnalysis.erase(Info);
+ }
+ }
+
+ // Check inherited analysis also. If P is not preserving analysis
+ // provided by parent manager then remove it here.
+ for (DenseMap<AnalysisID, Pass *> *IA : InheritedAnalysis) {
+ if (!IA)
+ continue;
+
+ for (DenseMap<AnalysisID, Pass *>::iterator I = IA->begin(),
+ E = IA->end();
+ I != E;) {
+ DenseMap<AnalysisID, Pass *>::iterator Info = I++;
+ if (Info->second->getAsImmutablePass() == nullptr &&
+ !is_contained(PreservedSet, Info->first)) {
+ // Remove this analysis
+ if (PassDebugging >= Details) {
+ Pass *S = Info->second;
+ dbgs() << " -- '" << P->getPassName() << "' is not preserving '";
+ dbgs() << S->getPassName() << "'\n";
+ }
+ IA->erase(Info);
+ }
+ }
+ }
+}
+
+/// Remove analysis passes that are not used any longer
+void PMDataManager::removeDeadPasses(Pass *P, StringRef Msg,
+ enum PassDebuggingString DBG_STR) {
+
+ SmallVector<Pass *, 12> DeadPasses;
+
+ // If this is a on the fly manager then it does not have TPM.
+ if (!TPM)
+ return;
+
+ TPM->collectLastUses(DeadPasses, P);
+
+ if (PassDebugging >= Details && !DeadPasses.empty()) {
+ dbgs() << " -*- '" << P->getPassName();
+ dbgs() << "' is the last user of following pass instances.";
+ dbgs() << " Free these instances\n";
+ }
+
+ for (Pass *P : DeadPasses)
+ freePass(P, Msg, DBG_STR);
+}
+
+void PMDataManager::freePass(Pass *P, StringRef Msg,
+ enum PassDebuggingString DBG_STR) {
+ dumpPassInfo(P, FREEING_MSG, DBG_STR, Msg);
+
+ {
+ // If the pass crashes releasing memory, remember this.
+ PassManagerPrettyStackEntry X(P);
+ TimeRegion PassTimer(getPassTimer(P));
+
+ P->releaseMemory();
+ }
+
+ AnalysisID PI = P->getPassID();
+ if (const PassInfo *PInf = TPM->findAnalysisPassInfo(PI)) {
+ // Remove the pass itself (if it is not already removed).
+ AvailableAnalysis.erase(PI);
+
+ // Remove all interfaces this pass implements, for which it is also
+ // listed as the available implementation.
+ for (const PassInfo *PI : PInf->getInterfacesImplemented()) {
+ DenseMap<AnalysisID, Pass *>::iterator Pos =
+ AvailableAnalysis.find(PI->getTypeInfo());
+ if (Pos != AvailableAnalysis.end() && Pos->second == P)
+ AvailableAnalysis.erase(Pos);
+ }
+ }
+}
+
+/// Add pass P into the PassVector. Update
+/// AvailableAnalysis appropriately if ProcessAnalysis is true.
+void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
+ // This manager is going to manage pass P. Set up analysis resolver
+ // to connect them.
+ AnalysisResolver *AR = new AnalysisResolver(*this);
+ P->setResolver(AR);
+
+ // If a FunctionPass F is the last user of ModulePass info M
+ // then the F's manager, not F, records itself as a last user of M.
+ SmallVector<Pass *, 12> TransferLastUses;
+
+ if (!ProcessAnalysis) {
+ // Add pass
+ PassVector.push_back(P);
+ return;
+ }
+
+ // At the moment, this pass is the last user of all required passes.
+ SmallVector<Pass *, 12> LastUses;
+ SmallVector<Pass *, 8> UsedPasses;
+ SmallVector<AnalysisID, 8> ReqAnalysisNotAvailable;
+
+ unsigned PDepth = this->getDepth();
+
+ collectRequiredAndUsedAnalyses(UsedPasses, ReqAnalysisNotAvailable, P);
+ for (Pass *PUsed : UsedPasses) {
+ unsigned RDepth = 0;
+
+ assert(PUsed->getResolver() && "Analysis Resolver is not set");
+ PMDataManager &DM = PUsed->getResolver()->getPMDataManager();
+ RDepth = DM.getDepth();
+
+ if (PDepth == RDepth)
+ LastUses.push_back(PUsed);
+ else if (PDepth > RDepth) {
+ // Let the parent claim responsibility of last use
+ TransferLastUses.push_back(PUsed);
+ // Keep track of higher level analysis used by this manager.
+ HigherLevelAnalysis.push_back(PUsed);
+ } else
+ llvm_unreachable("Unable to accommodate Used Pass");
+ }
+
+ // Set P as P's last user until someone starts using P.
+ // However, if P is a Pass Manager then it does not need
+ // to record its last user.
+ if (!P->getAsPMDataManager())
+ LastUses.push_back(P);
+ TPM->setLastUser(LastUses, P);
+
+ if (!TransferLastUses.empty()) {
+ Pass *My_PM = getAsPass();
+ TPM->setLastUser(TransferLastUses, My_PM);
+ TransferLastUses.clear();
+ }
+
+ // Now, take care of required analyses that are not available.
+ for (AnalysisID ID : ReqAnalysisNotAvailable) {
+ const PassInfo *PI = TPM->findAnalysisPassInfo(ID);
+ Pass *AnalysisPass = PI->createPass();
+ this->addLowerLevelRequiredPass(P, AnalysisPass);
+ }
+
+ // Take a note of analysis required and made available by this pass.
+ // Remove the analysis not preserved by this pass
+ removeNotPreservedAnalysis(P);
+ recordAvailableAnalysis(P);
+
+ // Add pass
+ PassVector.push_back(P);
+}
+
+
+/// Populate UP with analysis pass that are used or required by
+/// pass P and are available. Populate RP_NotAvail with analysis
+/// pass that are required by pass P but are not available.
+void PMDataManager::collectRequiredAndUsedAnalyses(
+ SmallVectorImpl<Pass *> &UP, SmallVectorImpl<AnalysisID> &RP_NotAvail,
+ Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+
+ for (const auto &UsedID : AnUsage->getUsedSet())
+ if (Pass *AnalysisPass = findAnalysisPass(UsedID, true))
+ UP.push_back(AnalysisPass);
+
+ for (const auto &RequiredID : AnUsage->getRequiredSet())
+ if (Pass *AnalysisPass = findAnalysisPass(RequiredID, true))
+ UP.push_back(AnalysisPass);
+ else
+ RP_NotAvail.push_back(RequiredID);
+}
+
+// All Required analyses should be available to the pass as it runs! Here
+// we fill in the AnalysisImpls member of the pass so that it can
+// successfully use the getAnalysis() method to retrieve the
+// implementations it needs.
+//
+void PMDataManager::initializeAnalysisImpl(Pass *P) {
+ AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
+
+ for (const AnalysisID ID : AnUsage->getRequiredSet()) {
+ Pass *Impl = findAnalysisPass(ID, true);
+ if (!Impl)
+ // This may be analysis pass that is initialized on the fly.
+ // If that is not the case then it will raise an assert when it is used.
+ continue;
+ AnalysisResolver *AR = P->getResolver();
+ assert(AR && "Analysis Resolver is not set");
+ AR->addAnalysisImplsPair(ID, Impl);
+ }
+}
+
+/// Find the pass that implements Analysis AID. If desired pass is not found
+/// then return NULL.
+Pass *PMDataManager::findAnalysisPass(AnalysisID AID, bool SearchParent) {
+
+ // Check if AvailableAnalysis map has one entry.
+ DenseMap<AnalysisID, Pass*>::const_iterator I = AvailableAnalysis.find(AID);
+
+ if (I != AvailableAnalysis.end())
+ return I->second;
+
+ // Search Parents through TopLevelManager
+ if (SearchParent)
+ return TPM->findAnalysisPass(AID);
+
+ return nullptr;
+}
+
+// Print list of passes that are last used by P.
+void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
+ if (PassDebugging < Details)
+ return;
+
+ SmallVector<Pass *, 12> LUses;
+
+ // If this is a on the fly manager then it does not have TPM.
+ if (!TPM)
+ return;
+
+ TPM->collectLastUses(LUses, P);
+
+ for (Pass *P : LUses) {
+ dbgs() << "--" << std::string(Offset*2, ' ');
+ P->dumpPassStructure(0);
+ }
+}
+
+void PMDataManager::dumpPassArguments() const {
+ for (Pass *P : PassVector) {
+ if (PMDataManager *PMD = P->getAsPMDataManager())
+ PMD->dumpPassArguments();
+ else
+ if (const PassInfo *PI =
+ TPM->findAnalysisPassInfo(P->getPassID()))
+ if (!PI->isAnalysisGroup())
+ dbgs() << " -" << PI->getPassArgument();
+ }
+}
+
+void PMDataManager::dumpPassInfo(Pass *P, enum PassDebuggingString S1,
+ enum PassDebuggingString S2,
+ StringRef Msg) {
+ if (PassDebugging < Executions)
+ return;
+ dbgs() << "[" << std::chrono::system_clock::now() << "] " << (void *)this
+ << std::string(getDepth() * 2 + 1, ' ');
+ switch (S1) {
+ case EXECUTION_MSG:
+ dbgs() << "Executing Pass '" << P->getPassName();
+ break;
+ case MODIFICATION_MSG:
+ dbgs() << "Made Modification '" << P->getPassName();
+ break;
+ case FREEING_MSG:
+ dbgs() << " Freeing Pass '" << P->getPassName();
+ break;
+ default:
+ break;
+ }
+ switch (S2) {
+ case ON_FUNCTION_MSG:
+ dbgs() << "' on Function '" << Msg << "'...\n";
+ break;
+ case ON_MODULE_MSG:
+ dbgs() << "' on Module '" << Msg << "'...\n";
+ break;
+ case ON_REGION_MSG:
+ dbgs() << "' on Region '" << Msg << "'...\n";
+ break;
+ case ON_LOOP_MSG:
+ dbgs() << "' on Loop '" << Msg << "'...\n";
+ break;
+ case ON_CG_MSG:
+ dbgs() << "' on Call Graph Nodes '" << Msg << "'...\n";
+ break;
+ default:
+ break;
+ }
+}
+
+void PMDataManager::dumpRequiredSet(const Pass *P) const {
+ if (PassDebugging < Details)
+ return;
+
+ AnalysisUsage analysisUsage;
+ P->getAnalysisUsage(analysisUsage);
+ dumpAnalysisUsage("Required", P, analysisUsage.getRequiredSet());
+}
+
+void PMDataManager::dumpPreservedSet(const Pass *P) const {
+ if (PassDebugging < Details)
+ return;
+
+ AnalysisUsage analysisUsage;
+ P->getAnalysisUsage(analysisUsage);
+ dumpAnalysisUsage("Preserved", P, analysisUsage.getPreservedSet());
+}
+
+void PMDataManager::dumpUsedSet(const Pass *P) const {
+ if (PassDebugging < Details)
+ return;
+
+ AnalysisUsage analysisUsage;
+ P->getAnalysisUsage(analysisUsage);
+ dumpAnalysisUsage("Used", P, analysisUsage.getUsedSet());
+}
+
+void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P,
+ const AnalysisUsage::VectorType &Set) const {
+ assert(PassDebugging >= Details);
+ if (Set.empty())
+ return;
+ dbgs() << (const void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:";
+ for (unsigned i = 0; i != Set.size(); ++i) {
+ if (i) dbgs() << ',';
+ const PassInfo *PInf = TPM->findAnalysisPassInfo(Set[i]);
+ if (!PInf) {
+ // Some preserved passes, such as AliasAnalysis, may not be initialized by
+ // all drivers.
+ dbgs() << " Uninitialized Pass";
+ continue;
+ }
+ dbgs() << ' ' << PInf->getPassName();
+ }
+ dbgs() << '\n';
+}
+
+/// Add RequiredPass into list of lower level passes required by pass P.
+/// RequiredPass is run on the fly by Pass Manager when P requests it
+/// through getAnalysis interface.
+/// This should be handled by specific pass manager.
+void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
+ if (TPM) {
+ TPM->dumpArguments();
+ TPM->dumpPasses();
+ }
+
+ // Module Level pass may required Function Level analysis info
+ // (e.g. dominator info). Pass manager uses on the fly function pass manager
+ // to provide this on demand. In that case, in Pass manager terminology,
+ // module level pass is requiring lower level analysis info managed by
+ // lower level pass manager.
+
+ // When Pass manager is not able to order required analysis info, Pass manager
+ // checks whether any lower level manager will be able to provide this
+ // analysis info on demand or not.
+#ifndef NDEBUG
+ dbgs() << "Unable to schedule '" << RequiredPass->getPassName();
+ dbgs() << "' required by '" << P->getPassName() << "'\n";
+#endif
+ llvm_unreachable("Unable to schedule pass");
+}
+
+std::tuple<Pass *, bool> PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI,
+ Function &F) {
+ llvm_unreachable("Unable to find on the fly pass");
+}
+
+// Destructor
+PMDataManager::~PMDataManager() {
+ for (Pass *P : PassVector)
+ delete P;
+}
+
+//===----------------------------------------------------------------------===//
+// NOTE: Is this the right place to define this method ?
+// getAnalysisIfAvailable - Return analysis result or null if it doesn't exist.
+Pass *AnalysisResolver::getAnalysisIfAvailable(AnalysisID ID) const {
+ return PM.findAnalysisPass(ID, true);
+}
+
+std::tuple<Pass *, bool>
+AnalysisResolver::findImplPass(Pass *P, AnalysisID AnalysisPI, Function &F) {
+ return PM.getOnTheFlyPass(P, AnalysisPI, F);
+}
+
+namespace llvm {
+namespace legacy {
+
+//===----------------------------------------------------------------------===//
+// FunctionPassManager implementation
+
+/// Create new Function pass manager
+FunctionPassManager::FunctionPassManager(Module *m) : M(m) {
+ FPM = new legacy::FunctionPassManagerImpl();
+ // FPM is the top level manager.
+ FPM->setTopLevelManager(FPM);
+
+ AnalysisResolver *AR = new AnalysisResolver(*FPM);
+ FPM->setResolver(AR);
+}
+
+FunctionPassManager::~FunctionPassManager() {
+ delete FPM;
+}
+
+void FunctionPassManager::add(Pass *P) {
+ FPM->add(P);
+}
+
+/// run - Execute all of the passes scheduled for execution. Keep
+/// track of whether any of the passes modifies the function, and if
+/// so, return true.
+///
+bool FunctionPassManager::run(Function &F) {
+ handleAllErrors(F.materialize(), [&](ErrorInfoBase &EIB) {
+ report_fatal_error(Twine("Error reading bitcode file: ") + EIB.message());
+ });
+ return FPM->run(F);
+}
+
+
+/// doInitialization - Run all of the initializers for the function passes.
+///
+bool FunctionPassManager::doInitialization() {
+ return FPM->doInitialization(*M);
+}
+
+/// doFinalization - Run all of the finalizers for the function passes.
+///
+bool FunctionPassManager::doFinalization() {
+ return FPM->doFinalization(*M);
+}
+} // namespace legacy
+} // namespace llvm
+
+/// cleanup - After running all passes, clean up pass manager cache.
+void FPPassManager::cleanup() {
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ FunctionPass *FP = getContainedPass(Index);
+ AnalysisResolver *AR = FP->getResolver();
+ assert(AR && "Analysis Resolver is not set");
+ AR->clearAnalysisImpls();
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// FPPassManager implementation
+
+char FPPassManager::ID = 0;
+/// Print passes managed by this manager
+void FPPassManager::dumpPassStructure(unsigned Offset) {
+ dbgs().indent(Offset*2) << "FunctionPass Manager\n";
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ FunctionPass *FP = getContainedPass(Index);
+ FP->dumpPassStructure(Offset + 1);
+ dumpLastUses(FP, Offset+1);
+ }
+}
+
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnFunction method. Keep track of whether any of the passes modifies
+/// the function, and if so, return true.
+bool FPPassManager::runOnFunction(Function &F) {
+ if (F.isDeclaration())
+ return false;
+
+ bool Changed = false;
+ Module &M = *F.getParent();
+ // Collect inherited analysis from Module level pass manager.
+ populateInheritedAnalysis(TPM->activeStack);
+
+ unsigned InstrCount, FunctionSize = 0;
+ StringMap<std::pair<unsigned, unsigned>> FunctionToInstrCount;
+ bool EmitICRemark = M.shouldEmitInstrCountChangedRemark();
+ // Collect the initial size of the module.
+ if (EmitICRemark) {
+ InstrCount = initSizeRemarkInfo(M, FunctionToInstrCount);
+ FunctionSize = F.getInstructionCount();
+ }
+
+ llvm::TimeTraceScope FunctionScope("OptFunction", F.getName());
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ FunctionPass *FP = getContainedPass(Index);
+ bool LocalChanged = false;
+
+ llvm::TimeTraceScope PassScope("RunPass", FP->getPassName());
+
+ dumpPassInfo(FP, EXECUTION_MSG, ON_FUNCTION_MSG, F.getName());
+ dumpRequiredSet(FP);
+
+ initializeAnalysisImpl(FP);
+
+ {
+ PassManagerPrettyStackEntry X(FP, F);
+ TimeRegion PassTimer(getPassTimer(FP));
+#ifdef EXPENSIVE_CHECKS
+ uint64_t RefHash = FP->structuralHash(F);
+#endif
+ LocalChanged |= FP->runOnFunction(F);
+
+#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
+ if (!LocalChanged && (RefHash != FP->structuralHash(F))) {
+ llvm::errs() << "Pass modifies its input and doesn't report it: "
+ << FP->getPassName() << "\n";
+ llvm_unreachable("Pass modifies its input and doesn't report it");
+ }
+#endif
+
+ if (EmitICRemark) {
+ unsigned NewSize = F.getInstructionCount();
+
+ // Update the size of the function, emit a remark, and update the size
+ // of the module.
+ if (NewSize != FunctionSize) {
+ int64_t Delta = static_cast<int64_t>(NewSize) -
+ static_cast<int64_t>(FunctionSize);
+ emitInstrCountChangedRemark(FP, M, Delta, InstrCount,
+ FunctionToInstrCount, &F);
+ InstrCount = static_cast<int64_t>(InstrCount) + Delta;
+ FunctionSize = NewSize;
+ }
+ }
+ }
+
+ Changed |= LocalChanged;
+ if (LocalChanged)
+ dumpPassInfo(FP, MODIFICATION_MSG, ON_FUNCTION_MSG, F.getName());
+ dumpPreservedSet(FP);
+ dumpUsedSet(FP);
+
+ verifyPreservedAnalysis(FP);
+ if (LocalChanged)
+ removeNotPreservedAnalysis(FP);
+ recordAvailableAnalysis(FP);
+ removeDeadPasses(FP, F.getName(), ON_FUNCTION_MSG);
+ }
+
+ return Changed;
+}
+
+bool FPPassManager::runOnModule(Module &M) {
+ bool Changed = false;
+
+ for (Function &F : M)
+ Changed |= runOnFunction(F);
+
+ return Changed;
+}
+
+bool FPPassManager::doInitialization(Module &M) {
+ bool Changed = false;
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index)
+ Changed |= getContainedPass(Index)->doInitialization(M);
+
+ return Changed;
+}
+
+bool FPPassManager::doFinalization(Module &M) {
+ bool Changed = false;
+
+ for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index)
+ Changed |= getContainedPass(Index)->doFinalization(M);
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// MPPassManager implementation
+
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnModule method. Keep track of whether any of the passes modifies
+/// the module, and if so, return true.
+bool
+MPPassManager::runOnModule(Module &M) {
+ llvm::TimeTraceScope TimeScope("OptModule", M.getName());
+
+ bool Changed = false;
+
+ // Initialize on-the-fly passes
+ for (auto &OnTheFlyManager : OnTheFlyManagers) {
+ legacy::FunctionPassManagerImpl *FPP = OnTheFlyManager.second;
+ Changed |= FPP->doInitialization(M);
+ }
+
+ // Initialize module passes
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index)
+ Changed |= getContainedPass(Index)->doInitialization(M);
+
+ unsigned InstrCount;
+ StringMap<std::pair<unsigned, unsigned>> FunctionToInstrCount;
+ bool EmitICRemark = M.shouldEmitInstrCountChangedRemark();
+ // Collect the initial size of the module.
+ if (EmitICRemark)
+ InstrCount = initSizeRemarkInfo(M, FunctionToInstrCount);
+
+ for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
+ ModulePass *MP = getContainedPass(Index);
+ bool LocalChanged = false;
+
+ dumpPassInfo(MP, EXECUTION_MSG, ON_MODULE_MSG, M.getModuleIdentifier());
+ dumpRequiredSet(MP);
+
+ initializeAnalysisImpl(MP);
+
+ {
+ PassManagerPrettyStackEntry X(MP, M);
+ TimeRegion PassTimer(getPassTimer(MP));
+
+#ifdef EXPENSIVE_CHECKS
+ uint64_t RefHash = MP->structuralHash(M);
+#endif
+
+ LocalChanged |= MP->runOnModule(M);
+
+#ifdef EXPENSIVE_CHECKS
+ assert((LocalChanged || (RefHash == MP->structuralHash(M))) &&
+ "Pass modifies its input and doesn't report it.");
+#endif
+
+ if (EmitICRemark) {
+ // Update the size of the module.
+ unsigned ModuleCount = M.getInstructionCount();
+ if (ModuleCount != InstrCount) {
+ int64_t Delta = static_cast<int64_t>(ModuleCount) -
+ static_cast<int64_t>(InstrCount);
+ emitInstrCountChangedRemark(MP, M, Delta, InstrCount,
+ FunctionToInstrCount);
+ InstrCount = ModuleCount;
+ }
+ }
+ }
+
+ Changed |= LocalChanged;
+ if (LocalChanged)
+ dumpPassInfo(MP, MODIFICATION_MSG, ON_MODULE_MSG,
+ M.getModuleIdentifier());
+ dumpPreservedSet(MP);
+ dumpUsedSet(MP);
+
+ verifyPreservedAnalysis(MP);
+ if (LocalChanged)
+ removeNotPreservedAnalysis(MP);
+ recordAvailableAnalysis(MP);
+ removeDeadPasses(MP, M.getModuleIdentifier(), ON_MODULE_MSG);
+ }
+
+ // Finalize module passes
+ for (int Index = getNumContainedPasses() - 1; Index >= 0; --Index)
+ Changed |= getContainedPass(Index)->doFinalization(M);
+
+ // Finalize on-the-fly passes
+ for (auto &OnTheFlyManager : OnTheFlyManagers) {
+ legacy::FunctionPassManagerImpl *FPP = OnTheFlyManager.second;
+ // We don't know when is the last time an on-the-fly pass is run,
+ // so we need to releaseMemory / finalize here
+ FPP->releaseMemoryOnTheFly();
+ Changed |= FPP->doFinalization(M);
+ }
+
+ return Changed;
+}
+
+/// Add RequiredPass into list of lower level passes required by pass P.
+/// RequiredPass is run on the fly by Pass Manager when P requests it
+/// through getAnalysis interface.
+void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
+ assert(RequiredPass && "No required pass?");
+ assert(P->getPotentialPassManagerType() == PMT_ModulePassManager &&
+ "Unable to handle Pass that requires lower level Analysis pass");
+ assert((P->getPotentialPassManagerType() <
+ RequiredPass->getPotentialPassManagerType()) &&
+ "Unable to handle Pass that requires lower level Analysis pass");
+
+ legacy::FunctionPassManagerImpl *FPP = OnTheFlyManagers[P];
+ if (!FPP) {
+ FPP = new legacy::FunctionPassManagerImpl();
+ // FPP is the top level manager.
+ FPP->setTopLevelManager(FPP);
+
+ OnTheFlyManagers[P] = FPP;
+ }
+ const PassInfo *RequiredPassPI =
+ TPM->findAnalysisPassInfo(RequiredPass->getPassID());
+
+ Pass *FoundPass = nullptr;
+ if (RequiredPassPI && RequiredPassPI->isAnalysis()) {
+ FoundPass =
+ ((PMTopLevelManager*)FPP)->findAnalysisPass(RequiredPass->getPassID());
+ }
+ if (!FoundPass) {
+ FoundPass = RequiredPass;
+ // This should be guaranteed to add RequiredPass to the passmanager given
+ // that we checked for an available analysis above.
+ FPP->add(RequiredPass);
+ }
+ // Register P as the last user of FoundPass or RequiredPass.
+ SmallVector<Pass *, 1> LU;
+ LU.push_back(FoundPass);
+ FPP->setLastUser(LU, P);
+}
+
+/// Return function pass corresponding to PassInfo PI, that is
+/// required by module pass MP. Instantiate analysis pass, by using
+/// its runOnFunction() for function F.
+std::tuple<Pass *, bool> MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI,
+ Function &F) {
+ legacy::FunctionPassManagerImpl *FPP = OnTheFlyManagers[MP];
+ assert(FPP && "Unable to find on the fly pass");
+
+ FPP->releaseMemoryOnTheFly();
+ bool Changed = FPP->run(F);
+ return std::make_tuple(((PMTopLevelManager *)FPP)->findAnalysisPass(PI),
+ Changed);
+}
+
+namespace llvm {
+namespace legacy {
+
+//===----------------------------------------------------------------------===//
+// PassManager implementation
+
+/// Create new pass manager
+PassManager::PassManager() {
+ PM = new PassManagerImpl();
+ // PM is the top level manager
+ PM->setTopLevelManager(PM);
+}
+
+PassManager::~PassManager() {
+ delete PM;
+}
+
+void PassManager::add(Pass *P) {
+ PM->add(P);
+}
+
+/// run - Execute all of the passes scheduled for execution. Keep track of
+/// whether any of the passes modifies the module, and if so, return true.
+bool PassManager::run(Module &M) {
+ return PM->run(M);
+}
+} // namespace legacy
+} // namespace llvm
+
+//===----------------------------------------------------------------------===//
+// PMStack implementation
+//
+
+// Pop Pass Manager from the stack and clear its analysis info.
+void PMStack::pop() {
+
+ PMDataManager *Top = this->top();
+ Top->initializeAnalysisInfo();
+
+ S.pop_back();
+}
+
+// Push PM on the stack and set its top level manager.
+void PMStack::push(PMDataManager *PM) {
+ assert(PM && "Unable to push. Pass Manager expected");
+ assert(PM->getDepth()==0 && "Pass Manager depth set too early");
+
+ if (!this->empty()) {
+ assert(PM->getPassManagerType() > this->top()->getPassManagerType()
+ && "pushing bad pass manager to PMStack");
+ PMTopLevelManager *TPM = this->top()->getTopLevelManager();
+
+ assert(TPM && "Unable to find top level manager");
+ TPM->addIndirectPassManager(PM);
+ PM->setTopLevelManager(TPM);
+ PM->setDepth(this->top()->getDepth()+1);
+ } else {
+ assert((PM->getPassManagerType() == PMT_ModulePassManager
+ || PM->getPassManagerType() == PMT_FunctionPassManager)
+ && "pushing bad pass manager to PMStack");
+ PM->setDepth(1);
+ }
+
+ S.push_back(PM);
+}
+
+// Dump content of the pass manager stack.
+LLVM_DUMP_METHOD void PMStack::dump() const {
+ for (PMDataManager *Manager : S)
+ dbgs() << Manager->getAsPass()->getPassName() << ' ';
+
+ if (!S.empty())
+ dbgs() << '\n';
+}
+
+/// Find appropriate Module Pass Manager in the PM Stack and
+/// add self into that manager.
+void ModulePass::assignPassManager(PMStack &PMS,
+ PassManagerType PreferredType) {
+ // Find Module Pass Manager
+ PassManagerType T;
+ while ((T = PMS.top()->getPassManagerType()) > PMT_ModulePassManager &&
+ T != PreferredType)
+ PMS.pop();
+ PMS.top()->add(this);
+}
+
+/// Find appropriate Function Pass Manager or Call Graph Pass Manager
+/// in the PM Stack and add self into that manager.
+void FunctionPass::assignPassManager(PMStack &PMS,
+ PassManagerType /*PreferredType*/) {
+ // Find Function Pass Manager
+ PMDataManager *PM;
+ while (PM = PMS.top(), PM->getPassManagerType() > PMT_FunctionPassManager)
+ PMS.pop();
+
+ // Create new Function Pass Manager if needed.
+ if (PM->getPassManagerType() != PMT_FunctionPassManager) {
+ // [1] Create new Function Pass Manager
+ auto *FPP = new FPPassManager;
+ FPP->populateInheritedAnalysis(PMS);
+
+ // [2] Set up new manager's top level manager
+ PM->getTopLevelManager()->addIndirectPassManager(FPP);
+
+ // [3] Assign manager to manage this new manager. This may create
+ // and push new managers into PMS
+ FPP->assignPassManager(PMS, PM->getPassManagerType());
+
+ // [4] Push new manager into PMS
+ PMS.push(FPP);
+ PM = FPP;
+ }
+
+ // Assign FPP as the manager of this pass.
+ PM->add(this);
+}
+
+legacy::PassManagerBase::~PassManagerBase() = default;
diff --git a/contrib/llvm-project/llvm/lib/IR/MDBuilder.cpp b/contrib/llvm-project/llvm/lib/IR/MDBuilder.cpp
new file mode 100644
index 000000000000..fc59fda9fe22
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/MDBuilder.cpp
@@ -0,0 +1,325 @@
+//===---- llvm/MDBuilder.cpp - Builder for LLVM metadata ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MDBuilder class, which is used as a convenient way to
+// create LLVM metadata with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Metadata.h"
+using namespace llvm;
+
+MDString *MDBuilder::createString(StringRef Str) {
+ return MDString::get(Context, Str);
+}
+
+ConstantAsMetadata *MDBuilder::createConstant(Constant *C) {
+ return ConstantAsMetadata::get(C);
+}
+
+MDNode *MDBuilder::createFPMath(float Accuracy) {
+ if (Accuracy == 0.0)
+ return nullptr;
+ assert(Accuracy > 0.0 && "Invalid fpmath accuracy!");
+ auto *Op =
+ createConstant(ConstantFP::get(Type::getFloatTy(Context), Accuracy));
+ return MDNode::get(Context, Op);
+}
+
+MDNode *MDBuilder::createBranchWeights(uint32_t TrueWeight,
+ uint32_t FalseWeight) {
+ return createBranchWeights({TrueWeight, FalseWeight});
+}
+
+MDNode *MDBuilder::createBranchWeights(ArrayRef<uint32_t> Weights) {
+ assert(Weights.size() >= 1 && "Need at least one branch weights!");
+
+ SmallVector<Metadata *, 4> Vals(Weights.size() + 1);
+ Vals[0] = createString("branch_weights");
+
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ for (unsigned i = 0, e = Weights.size(); i != e; ++i)
+ Vals[i + 1] = createConstant(ConstantInt::get(Int32Ty, Weights[i]));
+
+ return MDNode::get(Context, Vals);
+}
+
+MDNode *MDBuilder::createUnpredictable() {
+ return MDNode::get(Context, None);
+}
+
+MDNode *MDBuilder::createFunctionEntryCount(
+ uint64_t Count, bool Synthetic,
+ const DenseSet<GlobalValue::GUID> *Imports) {
+ Type *Int64Ty = Type::getInt64Ty(Context);
+ SmallVector<Metadata *, 8> Ops;
+ if (Synthetic)
+ Ops.push_back(createString("synthetic_function_entry_count"));
+ else
+ Ops.push_back(createString("function_entry_count"));
+ Ops.push_back(createConstant(ConstantInt::get(Int64Ty, Count)));
+ if (Imports) {
+ SmallVector<GlobalValue::GUID, 2> OrderID(Imports->begin(), Imports->end());
+ llvm::sort(OrderID);
+ for (auto ID : OrderID)
+ Ops.push_back(createConstant(ConstantInt::get(Int64Ty, ID)));
+ }
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDBuilder::createFunctionSectionPrefix(StringRef Prefix) {
+ return MDNode::get(Context,
+ {createString("function_section_prefix"),
+ createString(Prefix)});
+}
+
+MDNode *MDBuilder::createRange(const APInt &Lo, const APInt &Hi) {
+ assert(Lo.getBitWidth() == Hi.getBitWidth() && "Mismatched bitwidths!");
+
+ Type *Ty = IntegerType::get(Context, Lo.getBitWidth());
+ return createRange(ConstantInt::get(Ty, Lo), ConstantInt::get(Ty, Hi));
+}
+
+MDNode *MDBuilder::createRange(Constant *Lo, Constant *Hi) {
+ // If the range is everything then it is useless.
+ if (Hi == Lo)
+ return nullptr;
+
+ // Return the range [Lo, Hi).
+ return MDNode::get(Context, {createConstant(Lo), createConstant(Hi)});
+}
+
+MDNode *MDBuilder::createCallees(ArrayRef<Function *> Callees) {
+ SmallVector<Metadata *, 4> Ops;
+ for (Function *F : Callees)
+ Ops.push_back(createConstant(F));
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDBuilder::createCallbackEncoding(unsigned CalleeArgNo,
+ ArrayRef<int> Arguments,
+ bool VarArgArePassed) {
+ SmallVector<Metadata *, 4> Ops;
+
+ Type *Int64 = Type::getInt64Ty(Context);
+ Ops.push_back(createConstant(ConstantInt::get(Int64, CalleeArgNo)));
+
+ for (int ArgNo : Arguments)
+ Ops.push_back(createConstant(ConstantInt::get(Int64, ArgNo, true)));
+
+ Type *Int1 = Type::getInt1Ty(Context);
+ Ops.push_back(createConstant(ConstantInt::get(Int1, VarArgArePassed)));
+
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDBuilder::mergeCallbackEncodings(MDNode *ExistingCallbacks,
+ MDNode *NewCB) {
+ if (!ExistingCallbacks)
+ return MDNode::get(Context, {NewCB});
+
+ auto *NewCBCalleeIdxAsCM = cast<ConstantAsMetadata>(NewCB->getOperand(0));
+ uint64_t NewCBCalleeIdx =
+ cast<ConstantInt>(NewCBCalleeIdxAsCM->getValue())->getZExtValue();
+ (void)NewCBCalleeIdx;
+
+ SmallVector<Metadata *, 4> Ops;
+ unsigned NumExistingOps = ExistingCallbacks->getNumOperands();
+ Ops.resize(NumExistingOps + 1);
+
+ for (unsigned u = 0; u < NumExistingOps; u++) {
+ Ops[u] = ExistingCallbacks->getOperand(u);
+
+ auto *OldCBCalleeIdxAsCM = cast<ConstantAsMetadata>(Ops[u]);
+ uint64_t OldCBCalleeIdx =
+ cast<ConstantInt>(OldCBCalleeIdxAsCM->getValue())->getZExtValue();
+ (void)OldCBCalleeIdx;
+ assert(NewCBCalleeIdx != OldCBCalleeIdx &&
+ "Cannot map a callback callee index twice!");
+ }
+
+ Ops[NumExistingOps] = NewCB;
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDBuilder::createRTTIPointerPrologue(Constant *PrologueSig,
+ Constant *RTTI) {
+ SmallVector<Metadata *, 4> Ops;
+ Ops.push_back(createConstant(PrologueSig));
+ Ops.push_back(createConstant(RTTI));
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDBuilder::createAnonymousAARoot(StringRef Name, MDNode *Extra) {
+ SmallVector<Metadata *, 3> Args(1, nullptr);
+ if (Extra)
+ Args.push_back(Extra);
+ if (!Name.empty())
+ Args.push_back(createString(Name));
+ MDNode *Root = MDNode::getDistinct(Context, Args);
+
+ // At this point we have
+ // !0 = distinct !{null} <- root
+ // Replace the reserved operand with the root node itself.
+ Root->replaceOperandWith(0, Root);
+
+ // We now have
+ // !0 = distinct !{!0} <- root
+ return Root;
+}
+
+MDNode *MDBuilder::createTBAARoot(StringRef Name) {
+ return MDNode::get(Context, createString(Name));
+}
+
+/// Return metadata for a non-root TBAA node with the given name,
+/// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
+MDNode *MDBuilder::createTBAANode(StringRef Name, MDNode *Parent,
+ bool isConstant) {
+ if (isConstant) {
+ Constant *Flags = ConstantInt::get(Type::getInt64Ty(Context), 1);
+ return MDNode::get(Context,
+ {createString(Name), Parent, createConstant(Flags)});
+ }
+ return MDNode::get(Context, {createString(Name), Parent});
+}
+
+MDNode *MDBuilder::createAliasScopeDomain(StringRef Name) {
+ return MDNode::get(Context, createString(Name));
+}
+
+MDNode *MDBuilder::createAliasScope(StringRef Name, MDNode *Domain) {
+ return MDNode::get(Context, {createString(Name), Domain});
+}
+
+/// Return metadata for a tbaa.struct node with the given
+/// struct field descriptions.
+MDNode *MDBuilder::createTBAAStructNode(ArrayRef<TBAAStructField> Fields) {
+ SmallVector<Metadata *, 4> Vals(Fields.size() * 3);
+ Type *Int64 = Type::getInt64Ty(Context);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Vals[i * 3 + 0] = createConstant(ConstantInt::get(Int64, Fields[i].Offset));
+ Vals[i * 3 + 1] = createConstant(ConstantInt::get(Int64, Fields[i].Size));
+ Vals[i * 3 + 2] = Fields[i].Type;
+ }
+ return MDNode::get(Context, Vals);
+}
+
+/// Return metadata for a TBAA struct node in the type DAG
+/// with the given name, a list of pairs (offset, field type in the type DAG).
+MDNode *MDBuilder::createTBAAStructTypeNode(
+ StringRef Name, ArrayRef<std::pair<MDNode *, uint64_t>> Fields) {
+ SmallVector<Metadata *, 4> Ops(Fields.size() * 2 + 1);
+ Type *Int64 = Type::getInt64Ty(Context);
+ Ops[0] = createString(Name);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Ops[i * 2 + 1] = Fields[i].first;
+ Ops[i * 2 + 2] = createConstant(ConstantInt::get(Int64, Fields[i].second));
+ }
+ return MDNode::get(Context, Ops);
+}
+
+/// Return metadata for a TBAA scalar type node with the
+/// given name, an offset and a parent in the TBAA type DAG.
+MDNode *MDBuilder::createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
+ uint64_t Offset) {
+ ConstantInt *Off = ConstantInt::get(Type::getInt64Ty(Context), Offset);
+ return MDNode::get(Context,
+ {createString(Name), Parent, createConstant(Off)});
+}
+
+/// Return metadata for a TBAA tag node with the given
+/// base type, access type and offset relative to the base type.
+MDNode *MDBuilder::createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
+ uint64_t Offset, bool IsConstant) {
+ IntegerType *Int64 = Type::getInt64Ty(Context);
+ ConstantInt *Off = ConstantInt::get(Int64, Offset);
+ if (IsConstant) {
+ return MDNode::get(Context, {BaseType, AccessType, createConstant(Off),
+ createConstant(ConstantInt::get(Int64, 1))});
+ }
+ return MDNode::get(Context, {BaseType, AccessType, createConstant(Off)});
+}
+
+MDNode *MDBuilder::createTBAATypeNode(MDNode *Parent, uint64_t Size,
+ Metadata *Id,
+ ArrayRef<TBAAStructField> Fields) {
+ SmallVector<Metadata *, 4> Ops(3 + Fields.size() * 3);
+ Type *Int64 = Type::getInt64Ty(Context);
+ Ops[0] = Parent;
+ Ops[1] = createConstant(ConstantInt::get(Int64, Size));
+ Ops[2] = Id;
+ for (unsigned I = 0, E = Fields.size(); I != E; ++I) {
+ Ops[I * 3 + 3] = Fields[I].Type;
+ Ops[I * 3 + 4] = createConstant(ConstantInt::get(Int64, Fields[I].Offset));
+ Ops[I * 3 + 5] = createConstant(ConstantInt::get(Int64, Fields[I].Size));
+ }
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDBuilder::createTBAAAccessTag(MDNode *BaseType, MDNode *AccessType,
+ uint64_t Offset, uint64_t Size,
+ bool IsImmutable) {
+ IntegerType *Int64 = Type::getInt64Ty(Context);
+ auto *OffsetNode = createConstant(ConstantInt::get(Int64, Offset));
+ auto *SizeNode = createConstant(ConstantInt::get(Int64, Size));
+ if (IsImmutable) {
+ auto *ImmutabilityFlagNode = createConstant(ConstantInt::get(Int64, 1));
+ return MDNode::get(Context, {BaseType, AccessType, OffsetNode, SizeNode,
+ ImmutabilityFlagNode});
+ }
+ return MDNode::get(Context, {BaseType, AccessType, OffsetNode, SizeNode});
+}
+
+MDNode *MDBuilder::createMutableTBAAAccessTag(MDNode *Tag) {
+ MDNode *BaseType = cast<MDNode>(Tag->getOperand(0));
+ MDNode *AccessType = cast<MDNode>(Tag->getOperand(1));
+ Metadata *OffsetNode = Tag->getOperand(2);
+ uint64_t Offset = mdconst::extract<ConstantInt>(OffsetNode)->getZExtValue();
+
+ bool NewFormat = isa<MDNode>(AccessType->getOperand(0));
+
+ // See if the tag is already mutable.
+ unsigned ImmutabilityFlagOp = NewFormat ? 4 : 3;
+ if (Tag->getNumOperands() <= ImmutabilityFlagOp)
+ return Tag;
+
+ // If Tag is already mutable then return it.
+ Metadata *ImmutabilityFlagNode = Tag->getOperand(ImmutabilityFlagOp);
+ if (!mdconst::extract<ConstantInt>(ImmutabilityFlagNode)->getValue())
+ return Tag;
+
+ // Otherwise, create another node.
+ if (!NewFormat)
+ return createTBAAStructTagNode(BaseType, AccessType, Offset);
+
+ Metadata *SizeNode = Tag->getOperand(3);
+ uint64_t Size = mdconst::extract<ConstantInt>(SizeNode)->getZExtValue();
+ return createTBAAAccessTag(BaseType, AccessType, Offset, Size);
+}
+
+MDNode *MDBuilder::createIrrLoopHeaderWeight(uint64_t Weight) {
+ Metadata *Vals[] = {
+ createString("loop_header_weight"),
+ createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight)),
+ };
+ return MDNode::get(Context, Vals);
+}
+
+MDNode *MDBuilder::createPseudoProbeDesc(uint64_t GUID, uint64_t Hash,
+ Function *F) {
+ auto *Int64Ty = Type::getInt64Ty(Context);
+ SmallVector<Metadata *, 3> Ops(3);
+ Ops[0] = createConstant(ConstantInt::get(Int64Ty, GUID));
+ Ops[1] = createConstant(ConstantInt::get(Int64Ty, Hash));
+ Ops[2] = createString(F->getName());
+ return MDNode::get(Context, Ops);
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Mangler.cpp b/contrib/llvm-project/llvm/lib/IR/Mangler.cpp
new file mode 100644
index 000000000000..b8e3e40e4c1d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Mangler.cpp
@@ -0,0 +1,260 @@
+//===-- Mangler.cpp - Self-contained c/asm llvm name mangler --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Unified name mangler for assembly backends.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Mangler.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+enum ManglerPrefixTy {
+ Default, ///< Emit default string before each symbol.
+ Private, ///< Emit "private" prefix before each symbol.
+ LinkerPrivate ///< Emit "linker private" prefix before each symbol.
+};
+}
+
+static void getNameWithPrefixImpl(raw_ostream &OS, const Twine &GVName,
+ ManglerPrefixTy PrefixTy,
+ const DataLayout &DL, char Prefix) {
+ SmallString<256> TmpData;
+ StringRef Name = GVName.toStringRef(TmpData);
+ assert(!Name.empty() && "getNameWithPrefix requires non-empty name");
+
+ // No need to do anything special if the global has the special "do not
+ // mangle" flag in the name.
+ if (Name[0] == '\1') {
+ OS << Name.substr(1);
+ return;
+ }
+
+ if (DL.doNotMangleLeadingQuestionMark() && Name[0] == '?')
+ Prefix = '\0';
+
+ if (PrefixTy == Private)
+ OS << DL.getPrivateGlobalPrefix();
+ else if (PrefixTy == LinkerPrivate)
+ OS << DL.getLinkerPrivateGlobalPrefix();
+
+ if (Prefix != '\0')
+ OS << Prefix;
+
+ // If this is a simple string that doesn't need escaping, just append it.
+ OS << Name;
+}
+
+static void getNameWithPrefixImpl(raw_ostream &OS, const Twine &GVName,
+ const DataLayout &DL,
+ ManglerPrefixTy PrefixTy) {
+ char Prefix = DL.getGlobalPrefix();
+ return getNameWithPrefixImpl(OS, GVName, PrefixTy, DL, Prefix);
+}
+
+void Mangler::getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
+ const DataLayout &DL) {
+ return getNameWithPrefixImpl(OS, GVName, DL, Default);
+}
+
+void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const Twine &GVName, const DataLayout &DL) {
+ raw_svector_ostream OS(OutName);
+ char Prefix = DL.getGlobalPrefix();
+ return getNameWithPrefixImpl(OS, GVName, Default, DL, Prefix);
+}
+
+static bool hasByteCountSuffix(CallingConv::ID CC) {
+ switch (CC) {
+ case CallingConv::X86_FastCall:
+ case CallingConv::X86_StdCall:
+ case CallingConv::X86_VectorCall:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Microsoft fastcall and stdcall functions require a suffix on their name
+/// indicating the number of words of arguments they take.
+static void addByteCountSuffix(raw_ostream &OS, const Function *F,
+ const DataLayout &DL) {
+ // Calculate arguments size total.
+ unsigned ArgWords = 0;
+
+ const unsigned PtrSize = DL.getPointerSize();
+
+ for (const Argument &A : F->args()) {
+ // For the purposes of the byte count suffix, structs returned by pointer
+ // do not count as function arguments.
+ if (A.hasStructRetAttr())
+ continue;
+
+ // 'Dereference' type in case of byval or inalloca parameter attribute.
+ uint64_t AllocSize = A.hasPassPointeeByValueCopyAttr() ?
+ A.getPassPointeeByValueCopySize(DL) :
+ DL.getTypeAllocSize(A.getType());
+
+ // Size should be aligned to pointer size.
+ ArgWords += alignTo(AllocSize, PtrSize);
+ }
+
+ OS << '@' << ArgWords;
+}
+
+void Mangler::getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const {
+ ManglerPrefixTy PrefixTy = Default;
+ if (GV->hasPrivateLinkage()) {
+ if (CannotUsePrivateLabel)
+ PrefixTy = LinkerPrivate;
+ else
+ PrefixTy = Private;
+ }
+
+ const DataLayout &DL = GV->getParent()->getDataLayout();
+ if (!GV->hasName()) {
+ // Get the ID for the global, assigning a new one if we haven't got one
+ // already.
+ unsigned &ID = AnonGlobalIDs[GV];
+ if (ID == 0)
+ ID = AnonGlobalIDs.size();
+
+ // Must mangle the global into a unique ID.
+ getNameWithPrefixImpl(OS, "__unnamed_" + Twine(ID), DL, PrefixTy);
+ return;
+ }
+
+ StringRef Name = GV->getName();
+ char Prefix = DL.getGlobalPrefix();
+
+ // Mangle functions with Microsoft calling conventions specially. Only do
+ // this mangling for x86_64 vectorcall and 32-bit x86.
+ const Function *MSFunc = dyn_cast_or_null<Function>(GV->getAliaseeObject());
+
+ // Don't add byte count suffixes when '\01' or '?' are in the first
+ // character.
+ if (Name.startswith("\01") ||
+ (DL.doNotMangleLeadingQuestionMark() && Name.startswith("?")))
+ MSFunc = nullptr;
+
+ CallingConv::ID CC =
+ MSFunc ? MSFunc->getCallingConv() : (unsigned)CallingConv::C;
+ if (!DL.hasMicrosoftFastStdCallMangling() &&
+ CC != CallingConv::X86_VectorCall)
+ MSFunc = nullptr;
+ if (MSFunc) {
+ if (CC == CallingConv::X86_FastCall)
+ Prefix = '@'; // fastcall functions have an @ prefix instead of _.
+ else if (CC == CallingConv::X86_VectorCall)
+ Prefix = '\0'; // vectorcall functions have no prefix.
+ }
+
+ getNameWithPrefixImpl(OS, Name, PrefixTy, DL, Prefix);
+
+ if (!MSFunc)
+ return;
+
+ // If we are supposed to add a microsoft-style suffix for stdcall, fastcall,
+ // or vectorcall, add it. These functions have a suffix of @N where N is the
+ // cumulative byte size of all of the parameters to the function in decimal.
+ if (CC == CallingConv::X86_VectorCall)
+ OS << '@'; // vectorcall functions use a double @ suffix.
+ FunctionType *FT = MSFunc->getFunctionType();
+ if (hasByteCountSuffix(CC) &&
+ // "Pure" variadic functions do not receive @0 suffix.
+ (!FT->isVarArg() || FT->getNumParams() == 0 ||
+ (FT->getNumParams() == 1 && MSFunc->hasStructRetAttr())))
+ addByteCountSuffix(OS, MSFunc, DL);
+}
+
+void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const {
+ raw_svector_ostream OS(OutName);
+ getNameWithPrefix(OS, GV, CannotUsePrivateLabel);
+}
+
+// Check if the name needs quotes to be safe for the linker to interpret.
+static bool canBeUnquotedInDirective(char C) {
+ return isAlnum(C) || C == '_' || C == '@';
+}
+
+static bool canBeUnquotedInDirective(StringRef Name) {
+ if (Name.empty())
+ return false;
+
+ // If any of the characters in the string is an unacceptable character, force
+ // quotes.
+ for (char C : Name) {
+ if (!canBeUnquotedInDirective(C))
+ return false;
+ }
+
+ return true;
+}
+
+void llvm::emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
+ const Triple &TT, Mangler &Mangler) {
+ if (!GV->hasDLLExportStorageClass() || GV->isDeclaration())
+ return;
+
+ if (TT.isWindowsMSVCEnvironment())
+ OS << " /EXPORT:";
+ else
+ OS << " -export:";
+
+ bool NeedQuotes = GV->hasName() && !canBeUnquotedInDirective(GV->getName());
+ if (NeedQuotes)
+ OS << "\"";
+ if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment()) {
+ std::string Flag;
+ raw_string_ostream FlagOS(Flag);
+ Mangler.getNameWithPrefix(FlagOS, GV, false);
+ FlagOS.flush();
+ if (Flag[0] == GV->getParent()->getDataLayout().getGlobalPrefix())
+ OS << Flag.substr(1);
+ else
+ OS << Flag;
+ } else {
+ Mangler.getNameWithPrefix(OS, GV, false);
+ }
+ if (NeedQuotes)
+ OS << "\"";
+
+ if (!GV->getValueType()->isFunctionTy()) {
+ if (TT.isWindowsMSVCEnvironment())
+ OS << ",DATA";
+ else
+ OS << ",data";
+ }
+}
+
+void llvm::emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
+ const Triple &T, Mangler &M) {
+ if (!T.isWindowsMSVCEnvironment())
+ return;
+
+ OS << " /INCLUDE:";
+ bool NeedQuotes = GV->hasName() && !canBeUnquotedInDirective(GV->getName());
+ if (NeedQuotes)
+ OS << "\"";
+ M.getNameWithPrefix(OS, GV, false);
+ if (NeedQuotes)
+ OS << "\"";
+}
+
diff --git a/contrib/llvm-project/llvm/lib/IR/Metadata.cpp b/contrib/llvm-project/llvm/lib/IR/Metadata.cpp
new file mode 100644
index 000000000000..2a1a514922fd
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Metadata.cpp
@@ -0,0 +1,1655 @@
+//===- Metadata.cpp - Implement Metadata classes --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Metadata classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Metadata.h"
+#include "LLVMContextImpl.h"
+#include "MetadataImpl.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+MetadataAsValue::MetadataAsValue(Type *Ty, Metadata *MD)
+ : Value(Ty, MetadataAsValueVal), MD(MD) {
+ track();
+}
+
+MetadataAsValue::~MetadataAsValue() {
+ getType()->getContext().pImpl->MetadataAsValues.erase(MD);
+ untrack();
+}
+
+/// Canonicalize metadata arguments to intrinsics.
+///
+/// To support bitcode upgrades (and assembly semantic sugar) for \a
+/// MetadataAsValue, we need to canonicalize certain metadata.
+///
+/// - nullptr is replaced by an empty MDNode.
+/// - An MDNode with a single null operand is replaced by an empty MDNode.
+/// - An MDNode whose only operand is a \a ConstantAsMetadata gets skipped.
+///
+/// This maintains readability of bitcode from when metadata was a type of
+/// value, and these bridges were unnecessary.
+static Metadata *canonicalizeMetadataForValue(LLVMContext &Context,
+ Metadata *MD) {
+ if (!MD)
+ // !{}
+ return MDNode::get(Context, None);
+
+ // Return early if this isn't a single-operand MDNode.
+ auto *N = dyn_cast<MDNode>(MD);
+ if (!N || N->getNumOperands() != 1)
+ return MD;
+
+ if (!N->getOperand(0))
+ // !{}
+ return MDNode::get(Context, None);
+
+ if (auto *C = dyn_cast<ConstantAsMetadata>(N->getOperand(0)))
+ // Look through the MDNode.
+ return C;
+
+ return MD;
+}
+
+MetadataAsValue *MetadataAsValue::get(LLVMContext &Context, Metadata *MD) {
+ MD = canonicalizeMetadataForValue(Context, MD);
+ auto *&Entry = Context.pImpl->MetadataAsValues[MD];
+ if (!Entry)
+ Entry = new MetadataAsValue(Type::getMetadataTy(Context), MD);
+ return Entry;
+}
+
+MetadataAsValue *MetadataAsValue::getIfExists(LLVMContext &Context,
+ Metadata *MD) {
+ MD = canonicalizeMetadataForValue(Context, MD);
+ auto &Store = Context.pImpl->MetadataAsValues;
+ return Store.lookup(MD);
+}
+
+void MetadataAsValue::handleChangedMetadata(Metadata *MD) {
+ LLVMContext &Context = getContext();
+ MD = canonicalizeMetadataForValue(Context, MD);
+ auto &Store = Context.pImpl->MetadataAsValues;
+
+ // Stop tracking the old metadata.
+ Store.erase(this->MD);
+ untrack();
+ this->MD = nullptr;
+
+ // Start tracking MD, or RAUW if necessary.
+ auto *&Entry = Store[MD];
+ if (Entry) {
+ replaceAllUsesWith(Entry);
+ delete this;
+ return;
+ }
+
+ this->MD = MD;
+ track();
+ Entry = this;
+}
+
+void MetadataAsValue::track() {
+ if (MD)
+ MetadataTracking::track(&MD, *MD, *this);
+}
+
+void MetadataAsValue::untrack() {
+ if (MD)
+ MetadataTracking::untrack(MD);
+}
+
+bool MetadataTracking::track(void *Ref, Metadata &MD, OwnerTy Owner) {
+ assert(Ref && "Expected live reference");
+ assert((Owner || *static_cast<Metadata **>(Ref) == &MD) &&
+ "Reference without owner must be direct");
+ if (auto *R = ReplaceableMetadataImpl::getOrCreate(MD)) {
+ R->addRef(Ref, Owner);
+ return true;
+ }
+ if (auto *PH = dyn_cast<DistinctMDOperandPlaceholder>(&MD)) {
+ assert(!PH->Use && "Placeholders can only be used once");
+ assert(!Owner && "Unexpected callback to owner");
+ PH->Use = static_cast<Metadata **>(Ref);
+ return true;
+ }
+ return false;
+}
+
+void MetadataTracking::untrack(void *Ref, Metadata &MD) {
+ assert(Ref && "Expected live reference");
+ if (auto *R = ReplaceableMetadataImpl::getIfExists(MD))
+ R->dropRef(Ref);
+ else if (auto *PH = dyn_cast<DistinctMDOperandPlaceholder>(&MD))
+ PH->Use = nullptr;
+}
+
+bool MetadataTracking::retrack(void *Ref, Metadata &MD, void *New) {
+ assert(Ref && "Expected live reference");
+ assert(New && "Expected live reference");
+ assert(Ref != New && "Expected change");
+ if (auto *R = ReplaceableMetadataImpl::getIfExists(MD)) {
+ R->moveRef(Ref, New, MD);
+ return true;
+ }
+ assert(!isa<DistinctMDOperandPlaceholder>(MD) &&
+ "Unexpected move of an MDOperand");
+ assert(!isReplaceable(MD) &&
+ "Expected un-replaceable metadata, since we didn't move a reference");
+ return false;
+}
+
+bool MetadataTracking::isReplaceable(const Metadata &MD) {
+ return ReplaceableMetadataImpl::isReplaceable(MD);
+}
+
+SmallVector<Metadata *> ReplaceableMetadataImpl::getAllArgListUsers() {
+ SmallVector<std::pair<OwnerTy, uint64_t> *> MDUsersWithID;
+ for (auto Pair : UseMap) {
+ OwnerTy Owner = Pair.second.first;
+ if (!Owner.is<Metadata *>())
+ continue;
+ Metadata *OwnerMD = Owner.get<Metadata *>();
+ if (OwnerMD->getMetadataID() == Metadata::DIArgListKind)
+ MDUsersWithID.push_back(&UseMap[Pair.first]);
+ }
+ llvm::sort(MDUsersWithID, [](auto UserA, auto UserB) {
+ return UserA->second < UserB->second;
+ });
+ SmallVector<Metadata *> MDUsers;
+ for (auto UserWithID : MDUsersWithID)
+ MDUsers.push_back(UserWithID->first.get<Metadata *>());
+ return MDUsers;
+}
+
+void ReplaceableMetadataImpl::addRef(void *Ref, OwnerTy Owner) {
+ bool WasInserted =
+ UseMap.insert(std::make_pair(Ref, std::make_pair(Owner, NextIndex)))
+ .second;
+ (void)WasInserted;
+ assert(WasInserted && "Expected to add a reference");
+
+ ++NextIndex;
+ assert(NextIndex != 0 && "Unexpected overflow");
+}
+
+void ReplaceableMetadataImpl::dropRef(void *Ref) {
+ bool WasErased = UseMap.erase(Ref);
+ (void)WasErased;
+ assert(WasErased && "Expected to drop a reference");
+}
+
+void ReplaceableMetadataImpl::moveRef(void *Ref, void *New,
+ const Metadata &MD) {
+ auto I = UseMap.find(Ref);
+ assert(I != UseMap.end() && "Expected to move a reference");
+ auto OwnerAndIndex = I->second;
+ UseMap.erase(I);
+ bool WasInserted = UseMap.insert(std::make_pair(New, OwnerAndIndex)).second;
+ (void)WasInserted;
+ assert(WasInserted && "Expected to add a reference");
+
+ // Check that the references are direct if there's no owner.
+ (void)MD;
+ assert((OwnerAndIndex.first || *static_cast<Metadata **>(Ref) == &MD) &&
+ "Reference without owner must be direct");
+ assert((OwnerAndIndex.first || *static_cast<Metadata **>(New) == &MD) &&
+ "Reference without owner must be direct");
+}
+
+void ReplaceableMetadataImpl::SalvageDebugInfo(const Constant &C) {
+ if (!C.isUsedByMetadata()) {
+ return;
+ }
+
+ LLVMContext &Context = C.getType()->getContext();
+ auto &Store = Context.pImpl->ValuesAsMetadata;
+ auto I = Store.find(&C);
+ ValueAsMetadata *MD = I->second;
+ using UseTy =
+ std::pair<void *, std::pair<MetadataTracking::OwnerTy, uint64_t>>;
+ // Copy out uses and update value of Constant used by debug info metadata with undef below
+ SmallVector<UseTy, 8> Uses(MD->UseMap.begin(), MD->UseMap.end());
+
+ for (const auto &Pair : Uses) {
+ MetadataTracking::OwnerTy Owner = Pair.second.first;
+ if (!Owner)
+ continue;
+ if (!Owner.is<Metadata *>())
+ continue;
+ auto *OwnerMD = dyn_cast<MDNode>(Owner.get<Metadata *>());
+ if (!OwnerMD)
+ continue;
+ if (isa<DINode>(OwnerMD)) {
+ OwnerMD->handleChangedOperand(
+ Pair.first, ValueAsMetadata::get(UndefValue::get(C.getType())));
+ }
+ }
+}
+
+void ReplaceableMetadataImpl::replaceAllUsesWith(Metadata *MD) {
+ if (UseMap.empty())
+ return;
+
+ // Copy out uses since UseMap will get touched below.
+ using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
+ SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
+ llvm::sort(Uses, llvm::less_second());
+ for (const auto &Pair : Uses) {
+ // Check that this Ref hasn't disappeared after RAUW (when updating a
+ // previous Ref).
+ if (!UseMap.count(Pair.first))
+ continue;
+
+ OwnerTy Owner = Pair.second.first;
+ if (!Owner) {
+ // Update unowned tracking references directly.
+ Metadata *&Ref = *static_cast<Metadata **>(Pair.first);
+ Ref = MD;
+ if (MD)
+ MetadataTracking::track(Ref);
+ UseMap.erase(Pair.first);
+ continue;
+ }
+
+ // Check for MetadataAsValue.
+ if (Owner.is<MetadataAsValue *>()) {
+ Owner.get<MetadataAsValue *>()->handleChangedMetadata(MD);
+ continue;
+ }
+
+ // There's a Metadata owner -- dispatch.
+ Metadata *OwnerMD = Owner.get<Metadata *>();
+ switch (OwnerMD->getMetadataID()) {
+#define HANDLE_METADATA_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ cast<CLASS>(OwnerMD)->handleChangedOperand(Pair.first, MD); \
+ continue;
+#include "llvm/IR/Metadata.def"
+ default:
+ llvm_unreachable("Invalid metadata subclass");
+ }
+ }
+ assert(UseMap.empty() && "Expected all uses to be replaced");
+}
+
+void ReplaceableMetadataImpl::resolveAllUses(bool ResolveUsers) {
+ if (UseMap.empty())
+ return;
+
+ if (!ResolveUsers) {
+ UseMap.clear();
+ return;
+ }
+
+ // Copy out uses since UseMap could get touched below.
+ using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
+ SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
+ llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
+ return L.second.second < R.second.second;
+ });
+ UseMap.clear();
+ for (const auto &Pair : Uses) {
+ auto Owner = Pair.second.first;
+ if (!Owner)
+ continue;
+ if (Owner.is<MetadataAsValue *>())
+ continue;
+
+ // Resolve MDNodes that point at this.
+ auto *OwnerMD = dyn_cast<MDNode>(Owner.get<Metadata *>());
+ if (!OwnerMD)
+ continue;
+ if (OwnerMD->isResolved())
+ continue;
+ OwnerMD->decrementUnresolvedOperandCount();
+ }
+}
+
+ReplaceableMetadataImpl *ReplaceableMetadataImpl::getOrCreate(Metadata &MD) {
+ if (auto *N = dyn_cast<MDNode>(&MD))
+ return N->isResolved() ? nullptr : N->Context.getOrCreateReplaceableUses();
+ return dyn_cast<ValueAsMetadata>(&MD);
+}
+
+ReplaceableMetadataImpl *ReplaceableMetadataImpl::getIfExists(Metadata &MD) {
+ if (auto *N = dyn_cast<MDNode>(&MD))
+ return N->isResolved() ? nullptr : N->Context.getReplaceableUses();
+ return dyn_cast<ValueAsMetadata>(&MD);
+}
+
+bool ReplaceableMetadataImpl::isReplaceable(const Metadata &MD) {
+ if (auto *N = dyn_cast<MDNode>(&MD))
+ return !N->isResolved();
+ return isa<ValueAsMetadata>(&MD);
+}
+
+static DISubprogram *getLocalFunctionMetadata(Value *V) {
+ assert(V && "Expected value");
+ if (auto *A = dyn_cast<Argument>(V)) {
+ if (auto *Fn = A->getParent())
+ return Fn->getSubprogram();
+ return nullptr;
+ }
+
+ if (BasicBlock *BB = cast<Instruction>(V)->getParent()) {
+ if (auto *Fn = BB->getParent())
+ return Fn->getSubprogram();
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
+ValueAsMetadata *ValueAsMetadata::get(Value *V) {
+ assert(V && "Unexpected null Value");
+
+ auto &Context = V->getContext();
+ auto *&Entry = Context.pImpl->ValuesAsMetadata[V];
+ if (!Entry) {
+ assert((isa<Constant>(V) || isa<Argument>(V) || isa<Instruction>(V)) &&
+ "Expected constant or function-local value");
+ assert(!V->IsUsedByMD && "Expected this to be the only metadata use");
+ V->IsUsedByMD = true;
+ if (auto *C = dyn_cast<Constant>(V))
+ Entry = new ConstantAsMetadata(C);
+ else
+ Entry = new LocalAsMetadata(V);
+ }
+
+ return Entry;
+}
+
+ValueAsMetadata *ValueAsMetadata::getIfExists(Value *V) {
+ assert(V && "Unexpected null Value");
+ return V->getContext().pImpl->ValuesAsMetadata.lookup(V);
+}
+
+void ValueAsMetadata::handleDeletion(Value *V) {
+ assert(V && "Expected valid value");
+
+ auto &Store = V->getType()->getContext().pImpl->ValuesAsMetadata;
+ auto I = Store.find(V);
+ if (I == Store.end())
+ return;
+
+ // Remove old entry from the map.
+ ValueAsMetadata *MD = I->second;
+ assert(MD && "Expected valid metadata");
+ assert(MD->getValue() == V && "Expected valid mapping");
+ Store.erase(I);
+
+ // Delete the metadata.
+ MD->replaceAllUsesWith(nullptr);
+ delete MD;
+}
+
+void ValueAsMetadata::handleRAUW(Value *From, Value *To) {
+ assert(From && "Expected valid value");
+ assert(To && "Expected valid value");
+ assert(From != To && "Expected changed value");
+ assert(From->getType() == To->getType() && "Unexpected type change");
+
+ LLVMContext &Context = From->getType()->getContext();
+ auto &Store = Context.pImpl->ValuesAsMetadata;
+ auto I = Store.find(From);
+ if (I == Store.end()) {
+ assert(!From->IsUsedByMD && "Expected From not to be used by metadata");
+ return;
+ }
+
+ // Remove old entry from the map.
+ assert(From->IsUsedByMD && "Expected From to be used by metadata");
+ From->IsUsedByMD = false;
+ ValueAsMetadata *MD = I->second;
+ assert(MD && "Expected valid metadata");
+ assert(MD->getValue() == From && "Expected valid mapping");
+ Store.erase(I);
+
+ if (isa<LocalAsMetadata>(MD)) {
+ if (auto *C = dyn_cast<Constant>(To)) {
+ // Local became a constant.
+ MD->replaceAllUsesWith(ConstantAsMetadata::get(C));
+ delete MD;
+ return;
+ }
+ if (getLocalFunctionMetadata(From) && getLocalFunctionMetadata(To) &&
+ getLocalFunctionMetadata(From) != getLocalFunctionMetadata(To)) {
+ // DISubprogram changed.
+ MD->replaceAllUsesWith(nullptr);
+ delete MD;
+ return;
+ }
+ } else if (!isa<Constant>(To)) {
+ // Changed to function-local value.
+ MD->replaceAllUsesWith(nullptr);
+ delete MD;
+ return;
+ }
+
+ auto *&Entry = Store[To];
+ if (Entry) {
+ // The target already exists.
+ MD->replaceAllUsesWith(Entry);
+ delete MD;
+ return;
+ }
+
+ // Update MD in place (and update the map entry).
+ assert(!To->IsUsedByMD && "Expected this to be the only metadata use");
+ To->IsUsedByMD = true;
+ MD->V = To;
+ Entry = MD;
+}
+
+//===----------------------------------------------------------------------===//
+// MDString implementation.
+//
+
+MDString *MDString::get(LLVMContext &Context, StringRef Str) {
+ auto &Store = Context.pImpl->MDStringCache;
+ auto I = Store.try_emplace(Str);
+ auto &MapEntry = I.first->getValue();
+ if (!I.second)
+ return &MapEntry;
+ MapEntry.Entry = &*I.first;
+ return &MapEntry;
+}
+
+StringRef MDString::getString() const {
+ assert(Entry && "Expected to find string map entry");
+ return Entry->first();
+}
+
+//===----------------------------------------------------------------------===//
+// MDNode implementation.
+//
+
+// Assert that the MDNode types will not be unaligned by the objects
+// prepended to them.
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ static_assert( \
+ alignof(uint64_t) >= alignof(CLASS), \
+ "Alignment is insufficient after objects prepended to " #CLASS);
+#include "llvm/IR/Metadata.def"
+
+void *MDNode::operator new(size_t Size, size_t NumOps, StorageType Storage) {
+ // uint64_t is the most aligned type we need support (ensured by static_assert
+ // above)
+ size_t AllocSize =
+ alignTo(Header::getAllocSize(Storage, NumOps), alignof(uint64_t));
+ char *Mem = reinterpret_cast<char *>(::operator new(AllocSize + Size));
+ Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps, Storage);
+ return reinterpret_cast<void *>(H + 1);
+}
+
+void MDNode::operator delete(void *N) {
+ Header *H = reinterpret_cast<Header *>(N) - 1;
+ void *Mem = H->getAllocation();
+ H->~Header();
+ ::operator delete(Mem);
+}
+
+MDNode::MDNode(LLVMContext &Context, unsigned ID, StorageType Storage,
+ ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2)
+ : Metadata(ID, Storage), Context(Context) {
+ unsigned Op = 0;
+ for (Metadata *MD : Ops1)
+ setOperand(Op++, MD);
+ for (Metadata *MD : Ops2)
+ setOperand(Op++, MD);
+
+ if (!isUniqued())
+ return;
+
+ // Count the unresolved operands. If there are any, RAUW support will be
+ // added lazily on first reference.
+ countUnresolvedOperands();
+}
+
+TempMDNode MDNode::clone() const {
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid MDNode subclass");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: \
+ return cast<CLASS>(this)->cloneImpl();
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+MDNode::Header::Header(size_t NumOps, StorageType Storage) {
+ IsLarge = isLarge(NumOps);
+ IsResizable = isResizable(Storage);
+ SmallSize = getSmallSize(NumOps, IsResizable, IsLarge);
+ if (IsLarge) {
+ SmallNumOps = 0;
+ new (getLargePtr()) LargeStorageVector();
+ getLarge().resize(NumOps);
+ return;
+ }
+ SmallNumOps = NumOps;
+ MDOperand *O = reinterpret_cast<MDOperand *>(this) - SmallSize;
+ for (MDOperand *E = O + SmallSize; O != E;)
+ (void)new (O++) MDOperand();
+}
+
+MDNode::Header::~Header() {
+ if (IsLarge) {
+ getLarge().~LargeStorageVector();
+ return;
+ }
+ MDOperand *O = reinterpret_cast<MDOperand *>(this);
+ for (MDOperand *E = O - SmallSize; O != E; --O)
+ (void)(O - 1)->~MDOperand();
+}
+
+void *MDNode::Header::getSmallPtr() {
+ static_assert(alignof(MDOperand) <= alignof(Header),
+ "MDOperand too strongly aligned");
+ return reinterpret_cast<char *>(const_cast<Header *>(this)) -
+ sizeof(MDOperand) * SmallSize;
+}
+
+void MDNode::Header::resize(size_t NumOps) {
+ assert(IsResizable && "Node is not resizable");
+ if (operands().size() == NumOps)
+ return;
+
+ if (IsLarge)
+ getLarge().resize(NumOps);
+ else if (NumOps <= SmallSize)
+ resizeSmall(NumOps);
+ else
+ resizeSmallToLarge(NumOps);
+}
+
+void MDNode::Header::resizeSmall(size_t NumOps) {
+ assert(!IsLarge && "Expected a small MDNode");
+ assert(NumOps <= SmallSize && "NumOps too large for small resize");
+
+ MutableArrayRef<MDOperand> ExistingOps = operands();
+ assert(NumOps != ExistingOps.size() && "Expected a different size");
+
+ int NumNew = (int)NumOps - (int)ExistingOps.size();
+ MDOperand *O = ExistingOps.end();
+ for (int I = 0, E = NumNew; I < E; ++I)
+ (O++)->reset();
+ for (int I = 0, E = NumNew; I > E; --I)
+ (--O)->reset();
+ SmallNumOps = NumOps;
+ assert(O == operands().end() && "Operands not (un)initialized until the end");
+}
+
+void MDNode::Header::resizeSmallToLarge(size_t NumOps) {
+ assert(!IsLarge && "Expected a small MDNode");
+ assert(NumOps > SmallSize && "Expected NumOps to be larger than allocation");
+ LargeStorageVector NewOps;
+ NewOps.resize(NumOps);
+ llvm::move(operands(), NewOps.begin());
+ resizeSmall(0);
+ new (getLargePtr()) LargeStorageVector(std::move(NewOps));
+ IsLarge = true;
+}
+
+static bool isOperandUnresolved(Metadata *Op) {
+ if (auto *N = dyn_cast_or_null<MDNode>(Op))
+ return !N->isResolved();
+ return false;
+}
+
+void MDNode::countUnresolvedOperands() {
+ assert(getNumUnresolved() == 0 && "Expected unresolved ops to be uncounted");
+ assert(isUniqued() && "Expected this to be uniqued");
+ setNumUnresolved(count_if(operands(), isOperandUnresolved));
+}
+
+void MDNode::makeUniqued() {
+ assert(isTemporary() && "Expected this to be temporary");
+ assert(!isResolved() && "Expected this to be unresolved");
+
+ // Enable uniquing callbacks.
+ for (auto &Op : mutable_operands())
+ Op.reset(Op.get(), this);
+
+ // Make this 'uniqued'.
+ Storage = Uniqued;
+ countUnresolvedOperands();
+ if (!getNumUnresolved()) {
+ dropReplaceableUses();
+ assert(isResolved() && "Expected this to be resolved");
+ }
+
+ assert(isUniqued() && "Expected this to be uniqued");
+}
+
+void MDNode::makeDistinct() {
+ assert(isTemporary() && "Expected this to be temporary");
+ assert(!isResolved() && "Expected this to be unresolved");
+
+ // Drop RAUW support and store as a distinct node.
+ dropReplaceableUses();
+ storeDistinctInContext();
+
+ assert(isDistinct() && "Expected this to be distinct");
+ assert(isResolved() && "Expected this to be resolved");
+}
+
+void MDNode::resolve() {
+ assert(isUniqued() && "Expected this to be uniqued");
+ assert(!isResolved() && "Expected this to be unresolved");
+
+ setNumUnresolved(0);
+ dropReplaceableUses();
+
+ assert(isResolved() && "Expected this to be resolved");
+}
+
+void MDNode::dropReplaceableUses() {
+ assert(!getNumUnresolved() && "Unexpected unresolved operand");
+
+ // Drop any RAUW support.
+ if (Context.hasReplaceableUses())
+ Context.takeReplaceableUses()->resolveAllUses();
+}
+
+void MDNode::resolveAfterOperandChange(Metadata *Old, Metadata *New) {
+ assert(isUniqued() && "Expected this to be uniqued");
+ assert(getNumUnresolved() != 0 && "Expected unresolved operands");
+
+ // Check if an operand was resolved.
+ if (!isOperandUnresolved(Old)) {
+ if (isOperandUnresolved(New))
+ // An operand was un-resolved!
+ setNumUnresolved(getNumUnresolved() + 1);
+ } else if (!isOperandUnresolved(New))
+ decrementUnresolvedOperandCount();
+}
+
+void MDNode::decrementUnresolvedOperandCount() {
+ assert(!isResolved() && "Expected this to be unresolved");
+ if (isTemporary())
+ return;
+
+ assert(isUniqued() && "Expected this to be uniqued");
+ setNumUnresolved(getNumUnresolved() - 1);
+ if (getNumUnresolved())
+ return;
+
+ // Last unresolved operand has just been resolved.
+ dropReplaceableUses();
+ assert(isResolved() && "Expected this to become resolved");
+}
+
+void MDNode::resolveCycles() {
+ if (isResolved())
+ return;
+
+ // Resolve this node immediately.
+ resolve();
+
+ // Resolve all operands.
+ for (const auto &Op : operands()) {
+ auto *N = dyn_cast_or_null<MDNode>(Op);
+ if (!N)
+ continue;
+
+ assert(!N->isTemporary() &&
+ "Expected all forward declarations to be resolved");
+ if (!N->isResolved())
+ N->resolveCycles();
+ }
+}
+
+static bool hasSelfReference(MDNode *N) {
+ return llvm::is_contained(N->operands(), N);
+}
+
+MDNode *MDNode::replaceWithPermanentImpl() {
+ switch (getMetadataID()) {
+ default:
+ // If this type isn't uniquable, replace with a distinct node.
+ return replaceWithDistinctImpl();
+
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ case CLASS##Kind: \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+
+ // Even if this type is uniquable, self-references have to be distinct.
+ if (hasSelfReference(this))
+ return replaceWithDistinctImpl();
+ return replaceWithUniquedImpl();
+}
+
+MDNode *MDNode::replaceWithUniquedImpl() {
+ // Try to uniquify in place.
+ MDNode *UniquedNode = uniquify();
+
+ if (UniquedNode == this) {
+ makeUniqued();
+ return this;
+ }
+
+ // Collision, so RAUW instead.
+ replaceAllUsesWith(UniquedNode);
+ deleteAsSubclass();
+ return UniquedNode;
+}
+
+MDNode *MDNode::replaceWithDistinctImpl() {
+ makeDistinct();
+ return this;
+}
+
+void MDTuple::recalculateHash() {
+ setHash(MDTupleInfo::KeyTy::calculateHash(this));
+}
+
+void MDNode::dropAllReferences() {
+ for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
+ setOperand(I, nullptr);
+ if (Context.hasReplaceableUses()) {
+ Context.getReplaceableUses()->resolveAllUses(/* ResolveUsers */ false);
+ (void)Context.takeReplaceableUses();
+ }
+}
+
+void MDNode::handleChangedOperand(void *Ref, Metadata *New) {
+ unsigned Op = static_cast<MDOperand *>(Ref) - op_begin();
+ assert(Op < getNumOperands() && "Expected valid operand");
+
+ if (!isUniqued()) {
+ // This node is not uniqued. Just set the operand and be done with it.
+ setOperand(Op, New);
+ return;
+ }
+
+ // This node is uniqued.
+ eraseFromStore();
+
+ Metadata *Old = getOperand(Op);
+ setOperand(Op, New);
+
+ // Drop uniquing for self-reference cycles and deleted constants.
+ if (New == this || (!New && Old && isa<ConstantAsMetadata>(Old))) {
+ if (!isResolved())
+ resolve();
+ storeDistinctInContext();
+ return;
+ }
+
+ // Re-unique the node.
+ auto *Uniqued = uniquify();
+ if (Uniqued == this) {
+ if (!isResolved())
+ resolveAfterOperandChange(Old, New);
+ return;
+ }
+
+ // Collision.
+ if (!isResolved()) {
+ // Still unresolved, so RAUW.
+ //
+ // First, clear out all operands to prevent any recursion (similar to
+ // dropAllReferences(), but we still need the use-list).
+ for (unsigned O = 0, E = getNumOperands(); O != E; ++O)
+ setOperand(O, nullptr);
+ if (Context.hasReplaceableUses())
+ Context.getReplaceableUses()->replaceAllUsesWith(Uniqued);
+ deleteAsSubclass();
+ return;
+ }
+
+ // Store in non-uniqued form if RAUW isn't possible.
+ storeDistinctInContext();
+}
+
+void MDNode::deleteAsSubclass() {
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid subclass of MDNode");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: \
+ delete cast<CLASS>(this); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+template <class T, class InfoT>
+static T *uniquifyImpl(T *N, DenseSet<T *, InfoT> &Store) {
+ if (T *U = getUniqued(Store, N))
+ return U;
+
+ Store.insert(N);
+ return N;
+}
+
+template <class NodeTy> struct MDNode::HasCachedHash {
+ using Yes = char[1];
+ using No = char[2];
+ template <class U, U Val> struct SFINAE {};
+
+ template <class U>
+ static Yes &check(SFINAE<void (U::*)(unsigned), &U::setHash> *);
+ template <class U> static No &check(...);
+
+ static const bool value = sizeof(check<NodeTy>(nullptr)) == sizeof(Yes);
+};
+
+MDNode *MDNode::uniquify() {
+ assert(!hasSelfReference(this) && "Cannot uniquify a self-referencing node");
+
+ // Try to insert into uniquing store.
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid or non-uniquable subclass of MDNode");
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ case CLASS##Kind: { \
+ CLASS *SubclassThis = cast<CLASS>(this); \
+ std::integral_constant<bool, HasCachedHash<CLASS>::value> \
+ ShouldRecalculateHash; \
+ dispatchRecalculateHash(SubclassThis, ShouldRecalculateHash); \
+ return uniquifyImpl(SubclassThis, getContext().pImpl->CLASS##s); \
+ }
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+void MDNode::eraseFromStore() {
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid or non-uniquable subclass of MDNode");
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ case CLASS##Kind: \
+ getContext().pImpl->CLASS##s.erase(cast<CLASS>(this)); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+}
+
+MDTuple *MDTuple::getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs,
+ StorageType Storage, bool ShouldCreate) {
+ unsigned Hash = 0;
+ if (Storage == Uniqued) {
+ MDTupleInfo::KeyTy Key(MDs);
+ if (auto *N = getUniqued(Context.pImpl->MDTuples, Key))
+ return N;
+ if (!ShouldCreate)
+ return nullptr;
+ Hash = Key.getHash();
+ } else {
+ assert(ShouldCreate && "Expected non-uniqued nodes to always be created");
+ }
+
+ return storeImpl(new (MDs.size(), Storage)
+ MDTuple(Context, Storage, Hash, MDs),
+ Storage, Context.pImpl->MDTuples);
+}
+
+void MDNode::deleteTemporary(MDNode *N) {
+ assert(N->isTemporary() && "Expected temporary node");
+ N->replaceAllUsesWith(nullptr);
+ N->deleteAsSubclass();
+}
+
+void MDNode::storeDistinctInContext() {
+ assert(!Context.hasReplaceableUses() && "Unexpected replaceable uses");
+ assert(!getNumUnresolved() && "Unexpected unresolved nodes");
+ Storage = Distinct;
+ assert(isResolved() && "Expected this to be resolved");
+
+ // Reset the hash.
+ switch (getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid subclass of MDNode");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: { \
+ std::integral_constant<bool, HasCachedHash<CLASS>::value> ShouldResetHash; \
+ dispatchResetHash(cast<CLASS>(this), ShouldResetHash); \
+ break; \
+ }
+#include "llvm/IR/Metadata.def"
+ }
+
+ getContext().pImpl->DistinctMDNodes.push_back(this);
+}
+
+void MDNode::replaceOperandWith(unsigned I, Metadata *New) {
+ if (getOperand(I) == New)
+ return;
+
+ if (!isUniqued()) {
+ setOperand(I, New);
+ return;
+ }
+
+ handleChangedOperand(mutable_begin() + I, New);
+}
+
+void MDNode::setOperand(unsigned I, Metadata *New) {
+ assert(I < getNumOperands());
+ mutable_begin()[I].reset(New, isUniqued() ? this : nullptr);
+}
+
+/// Get a node or a self-reference that looks like it.
+///
+/// Special handling for finding self-references, for use by \a
+/// MDNode::concatenate() and \a MDNode::intersect() to maintain behaviour from
+/// when self-referencing nodes were still uniqued. If the first operand has
+/// the same operands as \c Ops, return the first operand instead.
+static MDNode *getOrSelfReference(LLVMContext &Context,
+ ArrayRef<Metadata *> Ops) {
+ if (!Ops.empty())
+ if (MDNode *N = dyn_cast_or_null<MDNode>(Ops[0]))
+ if (N->getNumOperands() == Ops.size() && N == N->getOperand(0)) {
+ for (unsigned I = 1, E = Ops.size(); I != E; ++I)
+ if (Ops[I] != N->getOperand(I))
+ return MDNode::get(Context, Ops);
+ return N;
+ }
+
+ return MDNode::get(Context, Ops);
+}
+
+MDNode *MDNode::concatenate(MDNode *A, MDNode *B) {
+ if (!A)
+ return B;
+ if (!B)
+ return A;
+
+ SmallSetVector<Metadata *, 4> MDs(A->op_begin(), A->op_end());
+ MDs.insert(B->op_begin(), B->op_end());
+
+ // FIXME: This preserves long-standing behaviour, but is it really the right
+ // behaviour? Or was that an unintended side-effect of node uniquing?
+ return getOrSelfReference(A->getContext(), MDs.getArrayRef());
+}
+
+MDNode *MDNode::intersect(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ SmallSetVector<Metadata *, 4> MDs(A->op_begin(), A->op_end());
+ SmallPtrSet<Metadata *, 4> BSet(B->op_begin(), B->op_end());
+ MDs.remove_if([&](Metadata *MD) { return !BSet.count(MD); });
+
+ // FIXME: This preserves long-standing behaviour, but is it really the right
+ // behaviour? Or was that an unintended side-effect of node uniquing?
+ return getOrSelfReference(A->getContext(), MDs.getArrayRef());
+}
+
+MDNode *MDNode::getMostGenericAliasScope(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ // Take the intersection of domains then union the scopes
+ // within those domains
+ SmallPtrSet<const MDNode *, 16> ADomains;
+ SmallPtrSet<const MDNode *, 16> IntersectDomains;
+ SmallSetVector<Metadata *, 4> MDs;
+ for (const MDOperand &MDOp : A->operands())
+ if (const MDNode *NAMD = dyn_cast<MDNode>(MDOp))
+ if (const MDNode *Domain = AliasScopeNode(NAMD).getDomain())
+ ADomains.insert(Domain);
+
+ for (const MDOperand &MDOp : B->operands())
+ if (const MDNode *NAMD = dyn_cast<MDNode>(MDOp))
+ if (const MDNode *Domain = AliasScopeNode(NAMD).getDomain())
+ if (ADomains.contains(Domain)) {
+ IntersectDomains.insert(Domain);
+ MDs.insert(MDOp);
+ }
+
+ for (const MDOperand &MDOp : A->operands())
+ if (const MDNode *NAMD = dyn_cast<MDNode>(MDOp))
+ if (const MDNode *Domain = AliasScopeNode(NAMD).getDomain())
+ if (IntersectDomains.contains(Domain))
+ MDs.insert(MDOp);
+
+ return MDs.empty() ? nullptr
+ : getOrSelfReference(A->getContext(), MDs.getArrayRef());
+}
+
+MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ APFloat AVal = mdconst::extract<ConstantFP>(A->getOperand(0))->getValueAPF();
+ APFloat BVal = mdconst::extract<ConstantFP>(B->getOperand(0))->getValueAPF();
+ if (AVal < BVal)
+ return A;
+ return B;
+}
+
+static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
+ return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
+}
+
+static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) {
+ return !A.intersectWith(B).isEmptySet() || isContiguous(A, B);
+}
+
+static bool tryMergeRange(SmallVectorImpl<ConstantInt *> &EndPoints,
+ ConstantInt *Low, ConstantInt *High) {
+ ConstantRange NewRange(Low->getValue(), High->getValue());
+ unsigned Size = EndPoints.size();
+ APInt LB = EndPoints[Size - 2]->getValue();
+ APInt LE = EndPoints[Size - 1]->getValue();
+ ConstantRange LastRange(LB, LE);
+ if (canBeMerged(NewRange, LastRange)) {
+ ConstantRange Union = LastRange.unionWith(NewRange);
+ Type *Ty = High->getType();
+ EndPoints[Size - 2] =
+ cast<ConstantInt>(ConstantInt::get(Ty, Union.getLower()));
+ EndPoints[Size - 1] =
+ cast<ConstantInt>(ConstantInt::get(Ty, Union.getUpper()));
+ return true;
+ }
+ return false;
+}
+
+static void addRange(SmallVectorImpl<ConstantInt *> &EndPoints,
+ ConstantInt *Low, ConstantInt *High) {
+ if (!EndPoints.empty())
+ if (tryMergeRange(EndPoints, Low, High))
+ return;
+
+ EndPoints.push_back(Low);
+ EndPoints.push_back(High);
+}
+
+MDNode *MDNode::getMostGenericRange(MDNode *A, MDNode *B) {
+ // Given two ranges, we want to compute the union of the ranges. This
+ // is slightly complicated by having to combine the intervals and merge
+ // the ones that overlap.
+
+ if (!A || !B)
+ return nullptr;
+
+ if (A == B)
+ return A;
+
+ // First, walk both lists in order of the lower boundary of each interval.
+ // At each step, try to merge the new interval to the last one we adedd.
+ SmallVector<ConstantInt *, 4> EndPoints;
+ int AI = 0;
+ int BI = 0;
+ int AN = A->getNumOperands() / 2;
+ int BN = B->getNumOperands() / 2;
+ while (AI < AN && BI < BN) {
+ ConstantInt *ALow = mdconst::extract<ConstantInt>(A->getOperand(2 * AI));
+ ConstantInt *BLow = mdconst::extract<ConstantInt>(B->getOperand(2 * BI));
+
+ if (ALow->getValue().slt(BLow->getValue())) {
+ addRange(EndPoints, ALow,
+ mdconst::extract<ConstantInt>(A->getOperand(2 * AI + 1)));
+ ++AI;
+ } else {
+ addRange(EndPoints, BLow,
+ mdconst::extract<ConstantInt>(B->getOperand(2 * BI + 1)));
+ ++BI;
+ }
+ }
+ while (AI < AN) {
+ addRange(EndPoints, mdconst::extract<ConstantInt>(A->getOperand(2 * AI)),
+ mdconst::extract<ConstantInt>(A->getOperand(2 * AI + 1)));
+ ++AI;
+ }
+ while (BI < BN) {
+ addRange(EndPoints, mdconst::extract<ConstantInt>(B->getOperand(2 * BI)),
+ mdconst::extract<ConstantInt>(B->getOperand(2 * BI + 1)));
+ ++BI;
+ }
+
+ // If we have more than 2 ranges (4 endpoints) we have to try to merge
+ // the last and first ones.
+ unsigned Size = EndPoints.size();
+ if (Size > 4) {
+ ConstantInt *FB = EndPoints[0];
+ ConstantInt *FE = EndPoints[1];
+ if (tryMergeRange(EndPoints, FB, FE)) {
+ for (unsigned i = 0; i < Size - 2; ++i) {
+ EndPoints[i] = EndPoints[i + 2];
+ }
+ EndPoints.resize(Size - 2);
+ }
+ }
+
+ // If in the end we have a single range, it is possible that it is now the
+ // full range. Just drop the metadata in that case.
+ if (EndPoints.size() == 2) {
+ ConstantRange Range(EndPoints[0]->getValue(), EndPoints[1]->getValue());
+ if (Range.isFullSet())
+ return nullptr;
+ }
+
+ SmallVector<Metadata *, 4> MDs;
+ MDs.reserve(EndPoints.size());
+ for (auto *I : EndPoints)
+ MDs.push_back(ConstantAsMetadata::get(I));
+ return MDNode::get(A->getContext(), MDs);
+}
+
+MDNode *MDNode::getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B) {
+ if (!A || !B)
+ return nullptr;
+
+ ConstantInt *AVal = mdconst::extract<ConstantInt>(A->getOperand(0));
+ ConstantInt *BVal = mdconst::extract<ConstantInt>(B->getOperand(0));
+ if (AVal->getZExtValue() < BVal->getZExtValue())
+ return A;
+ return B;
+}
+
+//===----------------------------------------------------------------------===//
+// NamedMDNode implementation.
+//
+
+static SmallVector<TrackingMDRef, 4> &getNMDOps(void *Operands) {
+ return *(SmallVector<TrackingMDRef, 4> *)Operands;
+}
+
+NamedMDNode::NamedMDNode(const Twine &N)
+ : Name(N.str()), Operands(new SmallVector<TrackingMDRef, 4>()) {}
+
+NamedMDNode::~NamedMDNode() {
+ dropAllReferences();
+ delete &getNMDOps(Operands);
+}
+
+unsigned NamedMDNode::getNumOperands() const {
+ return (unsigned)getNMDOps(Operands).size();
+}
+
+MDNode *NamedMDNode::getOperand(unsigned i) const {
+ assert(i < getNumOperands() && "Invalid Operand number!");
+ auto *N = getNMDOps(Operands)[i].get();
+ return cast_or_null<MDNode>(N);
+}
+
+void NamedMDNode::addOperand(MDNode *M) { getNMDOps(Operands).emplace_back(M); }
+
+void NamedMDNode::setOperand(unsigned I, MDNode *New) {
+ assert(I < getNumOperands() && "Invalid operand number");
+ getNMDOps(Operands)[I].reset(New);
+}
+
+void NamedMDNode::eraseFromParent() { getParent()->eraseNamedMetadata(this); }
+
+void NamedMDNode::clearOperands() { getNMDOps(Operands).clear(); }
+
+StringRef NamedMDNode::getName() const { return StringRef(Name); }
+
+//===----------------------------------------------------------------------===//
+// Instruction Metadata method implementations.
+//
+
+MDNode *MDAttachments::lookup(unsigned ID) const {
+ for (const auto &A : Attachments)
+ if (A.MDKind == ID)
+ return A.Node;
+ return nullptr;
+}
+
+void MDAttachments::get(unsigned ID, SmallVectorImpl<MDNode *> &Result) const {
+ for (const auto &A : Attachments)
+ if (A.MDKind == ID)
+ Result.push_back(A.Node);
+}
+
+void MDAttachments::getAll(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const {
+ for (const auto &A : Attachments)
+ Result.emplace_back(A.MDKind, A.Node);
+
+ // Sort the resulting array so it is stable with respect to metadata IDs. We
+ // need to preserve the original insertion order though.
+ if (Result.size() > 1)
+ llvm::stable_sort(Result, less_first());
+}
+
+void MDAttachments::set(unsigned ID, MDNode *MD) {
+ erase(ID);
+ if (MD)
+ insert(ID, *MD);
+}
+
+void MDAttachments::insert(unsigned ID, MDNode &MD) {
+ Attachments.push_back({ID, TrackingMDNodeRef(&MD)});
+}
+
+bool MDAttachments::erase(unsigned ID) {
+ if (empty())
+ return false;
+
+ // Common case is one value.
+ if (Attachments.size() == 1 && Attachments.back().MDKind == ID) {
+ Attachments.pop_back();
+ return true;
+ }
+
+ auto OldSize = Attachments.size();
+ llvm::erase_if(Attachments,
+ [ID](const Attachment &A) { return A.MDKind == ID; });
+ return OldSize != Attachments.size();
+}
+
+MDNode *Value::getMetadata(unsigned KindID) const {
+ if (!hasMetadata())
+ return nullptr;
+ const auto &Info = getContext().pImpl->ValueMetadata[this];
+ assert(!Info.empty() && "bit out of sync with hash table");
+ return Info.lookup(KindID);
+}
+
+MDNode *Value::getMetadata(StringRef Kind) const {
+ if (!hasMetadata())
+ return nullptr;
+ const auto &Info = getContext().pImpl->ValueMetadata[this];
+ assert(!Info.empty() && "bit out of sync with hash table");
+ return Info.lookup(getContext().getMDKindID(Kind));
+}
+
+void Value::getMetadata(unsigned KindID, SmallVectorImpl<MDNode *> &MDs) const {
+ if (hasMetadata())
+ getContext().pImpl->ValueMetadata[this].get(KindID, MDs);
+}
+
+void Value::getMetadata(StringRef Kind, SmallVectorImpl<MDNode *> &MDs) const {
+ if (hasMetadata())
+ getMetadata(getContext().getMDKindID(Kind), MDs);
+}
+
+void Value::getAllMetadata(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
+ if (hasMetadata()) {
+ assert(getContext().pImpl->ValueMetadata.count(this) &&
+ "bit out of sync with hash table");
+ const auto &Info = getContext().pImpl->ValueMetadata.find(this)->second;
+ assert(!Info.empty() && "Shouldn't have called this");
+ Info.getAll(MDs);
+ }
+}
+
+void Value::setMetadata(unsigned KindID, MDNode *Node) {
+ assert(isa<Instruction>(this) || isa<GlobalObject>(this));
+
+ // Handle the case when we're adding/updating metadata on a value.
+ if (Node) {
+ auto &Info = getContext().pImpl->ValueMetadata[this];
+ assert(!Info.empty() == HasMetadata && "bit out of sync with hash table");
+ if (Info.empty())
+ HasMetadata = true;
+ Info.set(KindID, Node);
+ return;
+ }
+
+ // Otherwise, we're removing metadata from an instruction.
+ assert((HasMetadata == (getContext().pImpl->ValueMetadata.count(this) > 0)) &&
+ "bit out of sync with hash table");
+ if (!HasMetadata)
+ return; // Nothing to remove!
+ auto &Info = getContext().pImpl->ValueMetadata[this];
+
+ // Handle removal of an existing value.
+ Info.erase(KindID);
+ if (!Info.empty())
+ return;
+ getContext().pImpl->ValueMetadata.erase(this);
+ HasMetadata = false;
+}
+
+void Value::setMetadata(StringRef Kind, MDNode *Node) {
+ if (!Node && !HasMetadata)
+ return;
+ setMetadata(getContext().getMDKindID(Kind), Node);
+}
+
+void Value::addMetadata(unsigned KindID, MDNode &MD) {
+ assert(isa<Instruction>(this) || isa<GlobalObject>(this));
+ if (!HasMetadata)
+ HasMetadata = true;
+ getContext().pImpl->ValueMetadata[this].insert(KindID, MD);
+}
+
+void Value::addMetadata(StringRef Kind, MDNode &MD) {
+ addMetadata(getContext().getMDKindID(Kind), MD);
+}
+
+bool Value::eraseMetadata(unsigned KindID) {
+ // Nothing to unset.
+ if (!HasMetadata)
+ return false;
+
+ auto &Store = getContext().pImpl->ValueMetadata[this];
+ bool Changed = Store.erase(KindID);
+ if (Store.empty())
+ clearMetadata();
+ return Changed;
+}
+
+void Value::clearMetadata() {
+ if (!HasMetadata)
+ return;
+ assert(getContext().pImpl->ValueMetadata.count(this) &&
+ "bit out of sync with hash table");
+ getContext().pImpl->ValueMetadata.erase(this);
+ HasMetadata = false;
+}
+
+void Instruction::setMetadata(StringRef Kind, MDNode *Node) {
+ if (!Node && !hasMetadata())
+ return;
+ setMetadata(getContext().getMDKindID(Kind), Node);
+}
+
+MDNode *Instruction::getMetadataImpl(StringRef Kind) const {
+ return getMetadataImpl(getContext().getMDKindID(Kind));
+}
+
+void Instruction::dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs) {
+ if (!Value::hasMetadata())
+ return; // Nothing to remove!
+
+ if (KnownIDs.empty()) {
+ // Just drop our entry at the store.
+ clearMetadata();
+ return;
+ }
+
+ SmallSet<unsigned, 4> KnownSet;
+ KnownSet.insert(KnownIDs.begin(), KnownIDs.end());
+
+ auto &MetadataStore = getContext().pImpl->ValueMetadata;
+ auto &Info = MetadataStore[this];
+ assert(!Info.empty() && "bit out of sync with hash table");
+ Info.remove_if([&KnownSet](const MDAttachments::Attachment &I) {
+ return !KnownSet.count(I.MDKind);
+ });
+
+ if (Info.empty()) {
+ // Drop our entry at the store.
+ clearMetadata();
+ }
+}
+
+void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
+ if (!Node && !hasMetadata())
+ return;
+
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (KindID == LLVMContext::MD_dbg) {
+ DbgLoc = DebugLoc(Node);
+ return;
+ }
+
+ Value::setMetadata(KindID, Node);
+}
+
+void Instruction::addAnnotationMetadata(StringRef Name) {
+ MDBuilder MDB(getContext());
+
+ auto *Existing = getMetadata(LLVMContext::MD_annotation);
+ SmallVector<Metadata *, 4> Names;
+ bool AppendName = true;
+ if (Existing) {
+ auto *Tuple = cast<MDTuple>(Existing);
+ for (auto &N : Tuple->operands()) {
+ if (cast<MDString>(N.get())->getString() == Name)
+ AppendName = false;
+ Names.push_back(N.get());
+ }
+ }
+ if (AppendName)
+ Names.push_back(MDB.createString(Name));
+
+ MDNode *MD = MDTuple::get(getContext(), Names);
+ setMetadata(LLVMContext::MD_annotation, MD);
+}
+
+AAMDNodes Instruction::getAAMetadata() const {
+ AAMDNodes Result;
+ Result.TBAA = getMetadata(LLVMContext::MD_tbaa);
+ Result.TBAAStruct = getMetadata(LLVMContext::MD_tbaa_struct);
+ Result.Scope = getMetadata(LLVMContext::MD_alias_scope);
+ Result.NoAlias = getMetadata(LLVMContext::MD_noalias);
+ return Result;
+}
+
+void Instruction::setAAMetadata(const AAMDNodes &N) {
+ setMetadata(LLVMContext::MD_tbaa, N.TBAA);
+ setMetadata(LLVMContext::MD_tbaa_struct, N.TBAAStruct);
+ setMetadata(LLVMContext::MD_alias_scope, N.Scope);
+ setMetadata(LLVMContext::MD_noalias, N.NoAlias);
+}
+
+MDNode *Instruction::getMetadataImpl(unsigned KindID) const {
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (KindID == LLVMContext::MD_dbg)
+ return DbgLoc.getAsMDNode();
+ return Value::getMetadata(KindID);
+}
+
+void Instruction::getAllMetadataImpl(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &Result) const {
+ Result.clear();
+
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (DbgLoc) {
+ Result.push_back(
+ std::make_pair((unsigned)LLVMContext::MD_dbg, DbgLoc.getAsMDNode()));
+ }
+ Value::getAllMetadata(Result);
+}
+
+bool Instruction::extractProfMetadata(uint64_t &TrueVal,
+ uint64_t &FalseVal) const {
+ assert(
+ (getOpcode() == Instruction::Br || getOpcode() == Instruction::Select) &&
+ "Looking for branch weights on something besides branch or select");
+
+ auto *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (!ProfileData || ProfileData->getNumOperands() != 3)
+ return false;
+
+ auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
+ if (!ProfDataName || !ProfDataName->getString().equals("branch_weights"))
+ return false;
+
+ auto *CITrue = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1));
+ auto *CIFalse = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2));
+ if (!CITrue || !CIFalse)
+ return false;
+
+ TrueVal = CITrue->getValue().getZExtValue();
+ FalseVal = CIFalse->getValue().getZExtValue();
+
+ return true;
+}
+
+bool Instruction::extractProfTotalWeight(uint64_t &TotalVal) const {
+ assert(
+ (getOpcode() == Instruction::Br || getOpcode() == Instruction::Select ||
+ getOpcode() == Instruction::Call || getOpcode() == Instruction::Invoke ||
+ getOpcode() == Instruction::IndirectBr ||
+ getOpcode() == Instruction::Switch) &&
+ "Looking for branch weights on something besides branch");
+
+ TotalVal = 0;
+ auto *ProfileData = getMetadata(LLVMContext::MD_prof);
+ if (!ProfileData)
+ return false;
+
+ auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
+ if (!ProfDataName)
+ return false;
+
+ if (ProfDataName->getString().equals("branch_weights")) {
+ TotalVal = 0;
+ for (unsigned i = 1; i < ProfileData->getNumOperands(); i++) {
+ auto *V = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i));
+ if (!V)
+ return false;
+ TotalVal += V->getValue().getZExtValue();
+ }
+ return true;
+ } else if (ProfDataName->getString().equals("VP") &&
+ ProfileData->getNumOperands() > 3) {
+ TotalVal = mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2))
+ ->getValue()
+ .getZExtValue();
+ return true;
+ }
+ return false;
+}
+
+void GlobalObject::copyMetadata(const GlobalObject *Other, unsigned Offset) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
+ Other->getAllMetadata(MDs);
+ for (auto &MD : MDs) {
+ // We need to adjust the type metadata offset.
+ if (Offset != 0 && MD.first == LLVMContext::MD_type) {
+ auto *OffsetConst = cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MD.second->getOperand(0))->getValue());
+ Metadata *TypeId = MD.second->getOperand(1);
+ auto *NewOffsetMD = ConstantAsMetadata::get(ConstantInt::get(
+ OffsetConst->getType(), OffsetConst->getValue() + Offset));
+ addMetadata(LLVMContext::MD_type,
+ *MDNode::get(getContext(), {NewOffsetMD, TypeId}));
+ continue;
+ }
+ // If an offset adjustment was specified we need to modify the DIExpression
+ // to prepend the adjustment:
+ // !DIExpression(DW_OP_plus, Offset, [original expr])
+ auto *Attachment = MD.second;
+ if (Offset != 0 && MD.first == LLVMContext::MD_dbg) {
+ DIGlobalVariable *GV = dyn_cast<DIGlobalVariable>(Attachment);
+ DIExpression *E = nullptr;
+ if (!GV) {
+ auto *GVE = cast<DIGlobalVariableExpression>(Attachment);
+ GV = GVE->getVariable();
+ E = GVE->getExpression();
+ }
+ ArrayRef<uint64_t> OrigElements;
+ if (E)
+ OrigElements = E->getElements();
+ std::vector<uint64_t> Elements(OrigElements.size() + 2);
+ Elements[0] = dwarf::DW_OP_plus_uconst;
+ Elements[1] = Offset;
+ llvm::copy(OrigElements, Elements.begin() + 2);
+ E = DIExpression::get(getContext(), Elements);
+ Attachment = DIGlobalVariableExpression::get(getContext(), GV, E);
+ }
+ addMetadata(MD.first, *Attachment);
+ }
+}
+
+void GlobalObject::addTypeMetadata(unsigned Offset, Metadata *TypeID) {
+ addMetadata(
+ LLVMContext::MD_type,
+ *MDTuple::get(getContext(),
+ {ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt64Ty(getContext()), Offset)),
+ TypeID}));
+}
+
+void GlobalObject::setVCallVisibilityMetadata(VCallVisibility Visibility) {
+ // Remove any existing vcall visibility metadata first in case we are
+ // updating.
+ eraseMetadata(LLVMContext::MD_vcall_visibility);
+ addMetadata(LLVMContext::MD_vcall_visibility,
+ *MDNode::get(getContext(),
+ {ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt64Ty(getContext()), Visibility))}));
+}
+
+GlobalObject::VCallVisibility GlobalObject::getVCallVisibility() const {
+ if (MDNode *MD = getMetadata(LLVMContext::MD_vcall_visibility)) {
+ uint64_t Val = cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MD->getOperand(0))->getValue())
+ ->getZExtValue();
+ assert(Val <= 2 && "unknown vcall visibility!");
+ return (VCallVisibility)Val;
+ }
+ return VCallVisibility::VCallVisibilityPublic;
+}
+
+void Function::setSubprogram(DISubprogram *SP) {
+ setMetadata(LLVMContext::MD_dbg, SP);
+}
+
+DISubprogram *Function::getSubprogram() const {
+ return cast_or_null<DISubprogram>(getMetadata(LLVMContext::MD_dbg));
+}
+
+bool Function::isDebugInfoForProfiling() const {
+ if (DISubprogram *SP = getSubprogram()) {
+ if (DICompileUnit *CU = SP->getUnit()) {
+ return CU->getDebugInfoForProfiling();
+ }
+ }
+ return false;
+}
+
+void GlobalVariable::addDebugInfo(DIGlobalVariableExpression *GV) {
+ addMetadata(LLVMContext::MD_dbg, *GV);
+}
+
+void GlobalVariable::getDebugInfo(
+ SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const {
+ SmallVector<MDNode *, 1> MDs;
+ getMetadata(LLVMContext::MD_dbg, MDs);
+ for (MDNode *MD : MDs)
+ GVs.push_back(cast<DIGlobalVariableExpression>(MD));
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/MetadataImpl.h b/contrib/llvm-project/llvm/lib/IR/MetadataImpl.h
new file mode 100644
index 000000000000..b4188dd7d3ee
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/MetadataImpl.h
@@ -0,0 +1,58 @@
+//===- MetadataImpl.h - Helpers for implementing metadata -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has private helpers for implementing metadata types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_METADATAIMPL_H
+#define LLVM_IR_METADATAIMPL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Metadata.h"
+
+namespace llvm {
+
+template <class T, class InfoT>
+static T *getUniqued(DenseSet<T *, InfoT> &Store,
+ const typename InfoT::KeyTy &Key) {
+ auto I = Store.find_as(Key);
+ return I == Store.end() ? nullptr : *I;
+}
+
+template <class T> T *MDNode::storeImpl(T *N, StorageType Storage) {
+ switch (Storage) {
+ case Uniqued:
+ llvm_unreachable("Cannot unique without a uniquing-store");
+ case Distinct:
+ N->storeDistinctInContext();
+ break;
+ case Temporary:
+ break;
+ }
+ return N;
+}
+
+template <class T, class StoreT>
+T *MDNode::storeImpl(T *N, StorageType Storage, StoreT &Store) {
+ switch (Storage) {
+ case Uniqued:
+ Store.insert(N);
+ break;
+ case Distinct:
+ N->storeDistinctInContext();
+ break;
+ case Temporary:
+ break;
+ }
+ return N;
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/IR/Module.cpp b/contrib/llvm-project/llvm/lib/IR/Module.cpp
new file mode 100644
index 000000000000..b51ea45f651a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Module.cpp
@@ -0,0 +1,850 @@
+//===- Module.cpp - Implement the Module class ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Module class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Module.h"
+#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/TypeFinder.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/RandomNumberGenerator.h"
+#include "llvm/Support/VersionTuple.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Methods to implement the globals and functions lists.
+//
+
+// Explicit instantiations of SymbolTableListTraits since some of the methods
+// are not in the public header file.
+template class llvm::SymbolTableListTraits<Function>;
+template class llvm::SymbolTableListTraits<GlobalVariable>;
+template class llvm::SymbolTableListTraits<GlobalAlias>;
+template class llvm::SymbolTableListTraits<GlobalIFunc>;
+
+//===----------------------------------------------------------------------===//
+// Primitive Module methods.
+//
+
+Module::Module(StringRef MID, LLVMContext &C)
+ : Context(C), ValSymTab(std::make_unique<ValueSymbolTable>(-1)),
+ ModuleID(std::string(MID)), SourceFileName(std::string(MID)), DL("") {
+ Context.addModule(this);
+}
+
+Module::~Module() {
+ Context.removeModule(this);
+ dropAllReferences();
+ GlobalList.clear();
+ FunctionList.clear();
+ AliasList.clear();
+ IFuncList.clear();
+}
+
+std::unique_ptr<RandomNumberGenerator>
+Module::createRNG(const StringRef Name) const {
+ SmallString<32> Salt(Name);
+
+ // This RNG is guaranteed to produce the same random stream only
+ // when the Module ID and thus the input filename is the same. This
+ // might be problematic if the input filename extension changes
+ // (e.g. from .c to .bc or .ll).
+ //
+ // We could store this salt in NamedMetadata, but this would make
+ // the parameter non-const. This would unfortunately make this
+ // interface unusable by any Machine passes, since they only have a
+ // const reference to their IR Module. Alternatively we can always
+ // store salt metadata from the Module constructor.
+ Salt += sys::path::filename(getModuleIdentifier());
+
+ return std::unique_ptr<RandomNumberGenerator>(
+ new RandomNumberGenerator(Salt));
+}
+
+/// getNamedValue - Return the first global value in the module with
+/// the specified name, of arbitrary type. This method returns null
+/// if a global with the specified name is not found.
+GlobalValue *Module::getNamedValue(StringRef Name) const {
+ return cast_or_null<GlobalValue>(getValueSymbolTable().lookup(Name));
+}
+
+unsigned Module::getNumNamedValues() const {
+ return getValueSymbolTable().size();
+}
+
+/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
+/// This ID is uniqued across modules in the current LLVMContext.
+unsigned Module::getMDKindID(StringRef Name) const {
+ return Context.getMDKindID(Name);
+}
+
+/// getMDKindNames - Populate client supplied SmallVector with the name for
+/// custom metadata IDs registered in this LLVMContext. ID #0 is not used,
+/// so it is filled in as an empty string.
+void Module::getMDKindNames(SmallVectorImpl<StringRef> &Result) const {
+ return Context.getMDKindNames(Result);
+}
+
+void Module::getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const {
+ return Context.getOperandBundleTags(Result);
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for easy access to the functions in the module.
+//
+
+// getOrInsertFunction - Look up the specified function in the module symbol
+// table. If it does not exist, add a prototype for the function and return
+// it. This is nice because it allows most passes to get away with not handling
+// the symbol table directly for this common task.
+//
+FunctionCallee Module::getOrInsertFunction(StringRef Name, FunctionType *Ty,
+ AttributeList AttributeList) {
+ // See if we have a definition for the specified function already.
+ GlobalValue *F = getNamedValue(Name);
+ if (!F) {
+ // Nope, add it
+ Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage,
+ DL.getProgramAddressSpace(), Name);
+ if (!New->isIntrinsic()) // Intrinsics get attrs set on construction
+ New->setAttributes(AttributeList);
+ FunctionList.push_back(New);
+ return {Ty, New}; // Return the new prototype.
+ }
+
+ // If the function exists but has the wrong type, return a bitcast to the
+ // right type.
+ auto *PTy = PointerType::get(Ty, F->getAddressSpace());
+ if (F->getType() != PTy)
+ return {Ty, ConstantExpr::getBitCast(F, PTy)};
+
+ // Otherwise, we just found the existing function or a prototype.
+ return {Ty, F};
+}
+
+FunctionCallee Module::getOrInsertFunction(StringRef Name, FunctionType *Ty) {
+ return getOrInsertFunction(Name, Ty, AttributeList());
+}
+
+// getFunction - Look up the specified function in the module symbol table.
+// If it does not exist, return null.
+//
+Function *Module::getFunction(StringRef Name) const {
+ return dyn_cast_or_null<Function>(getNamedValue(Name));
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for easy access to the global variables in the module.
+//
+
+/// getGlobalVariable - Look up the specified global variable in the module
+/// symbol table. If it does not exist, return null. The type argument
+/// should be the underlying type of the global, i.e., it should not have
+/// the top-level PointerType, which represents the address of the global.
+/// If AllowLocal is set to true, this function will return types that
+/// have an local. By default, these types are not returned.
+///
+GlobalVariable *Module::getGlobalVariable(StringRef Name,
+ bool AllowLocal) const {
+ if (GlobalVariable *Result =
+ dyn_cast_or_null<GlobalVariable>(getNamedValue(Name)))
+ if (AllowLocal || !Result->hasLocalLinkage())
+ return Result;
+ return nullptr;
+}
+
+/// getOrInsertGlobal - Look up the specified global in the module symbol table.
+/// 1. If it does not exist, add a declaration of the global and return it.
+/// 2. Else, the global exists but has the wrong type: return the function
+/// with a constantexpr cast to the right type.
+/// 3. Finally, if the existing global is the correct declaration, return the
+/// existing global.
+Constant *Module::getOrInsertGlobal(
+ StringRef Name, Type *Ty,
+ function_ref<GlobalVariable *()> CreateGlobalCallback) {
+ // See if we have a definition for the specified global already.
+ GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name));
+ if (!GV)
+ GV = CreateGlobalCallback();
+ assert(GV && "The CreateGlobalCallback is expected to create a global");
+
+ // If the variable exists but has the wrong type, return a bitcast to the
+ // right type.
+ Type *GVTy = GV->getType();
+ PointerType *PTy = PointerType::get(Ty, GVTy->getPointerAddressSpace());
+ if (GVTy != PTy)
+ return ConstantExpr::getBitCast(GV, PTy);
+
+ // Otherwise, we just found the existing function or a prototype.
+ return GV;
+}
+
+// Overload to construct a global variable using its constructor's defaults.
+Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
+ return getOrInsertGlobal(Name, Ty, [&] {
+ return new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
+ nullptr, Name);
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for easy access to the global variables in the module.
+//
+
+// getNamedAlias - Look up the specified global in the module symbol table.
+// If it does not exist, return null.
+//
+GlobalAlias *Module::getNamedAlias(StringRef Name) const {
+ return dyn_cast_or_null<GlobalAlias>(getNamedValue(Name));
+}
+
+GlobalIFunc *Module::getNamedIFunc(StringRef Name) const {
+ return dyn_cast_or_null<GlobalIFunc>(getNamedValue(Name));
+}
+
+/// getNamedMetadata - Return the first NamedMDNode in the module with the
+/// specified name. This method returns null if a NamedMDNode with the
+/// specified name is not found.
+NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
+ SmallString<256> NameData;
+ StringRef NameRef = Name.toStringRef(NameData);
+ return NamedMDSymTab.lookup(NameRef);
+}
+
+/// getOrInsertNamedMetadata - Return the first named MDNode in the module
+/// with the specified name. This method returns a new NamedMDNode if a
+/// NamedMDNode with the specified name is not found.
+NamedMDNode *Module::getOrInsertNamedMetadata(StringRef Name) {
+ NamedMDNode *&NMD = NamedMDSymTab[Name];
+ if (!NMD) {
+ NMD = new NamedMDNode(Name);
+ NMD->setParent(this);
+ NamedMDList.push_back(NMD);
+ }
+ return NMD;
+}
+
+/// eraseNamedMetadata - Remove the given NamedMDNode from this module and
+/// delete it.
+void Module::eraseNamedMetadata(NamedMDNode *NMD) {
+ NamedMDSymTab.erase(NMD->getName());
+ NamedMDList.erase(NMD->getIterator());
+}
+
+bool Module::isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB) {
+ if (ConstantInt *Behavior = mdconst::dyn_extract_or_null<ConstantInt>(MD)) {
+ uint64_t Val = Behavior->getLimitedValue();
+ if (Val >= ModFlagBehaviorFirstVal && Val <= ModFlagBehaviorLastVal) {
+ MFB = static_cast<ModFlagBehavior>(Val);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool Module::isValidModuleFlag(const MDNode &ModFlag, ModFlagBehavior &MFB,
+ MDString *&Key, Metadata *&Val) {
+ if (ModFlag.getNumOperands() < 3)
+ return false;
+ if (!isValidModFlagBehavior(ModFlag.getOperand(0), MFB))
+ return false;
+ MDString *K = dyn_cast_or_null<MDString>(ModFlag.getOperand(1));
+ if (!K)
+ return false;
+ Key = K;
+ Val = ModFlag.getOperand(2);
+ return true;
+}
+
+/// getModuleFlagsMetadata - Returns the module flags in the provided vector.
+void Module::
+getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const {
+ const NamedMDNode *ModFlags = getModuleFlagsMetadata();
+ if (!ModFlags) return;
+
+ for (const MDNode *Flag : ModFlags->operands()) {
+ ModFlagBehavior MFB;
+ MDString *Key = nullptr;
+ Metadata *Val = nullptr;
+ if (isValidModuleFlag(*Flag, MFB, Key, Val)) {
+ // Check the operands of the MDNode before accessing the operands.
+ // The verifier will actually catch these failures.
+ Flags.push_back(ModuleFlagEntry(MFB, Key, Val));
+ }
+ }
+}
+
+/// Return the corresponding value if Key appears in module flags, otherwise
+/// return null.
+Metadata *Module::getModuleFlag(StringRef Key) const {
+ SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
+ getModuleFlagsMetadata(ModuleFlags);
+ for (const ModuleFlagEntry &MFE : ModuleFlags) {
+ if (Key == MFE.Key->getString())
+ return MFE.Val;
+ }
+ return nullptr;
+}
+
+/// getModuleFlagsMetadata - Returns the NamedMDNode in the module that
+/// represents module-level flags. This method returns null if there are no
+/// module-level flags.
+NamedMDNode *Module::getModuleFlagsMetadata() const {
+ return getNamedMetadata("llvm.module.flags");
+}
+
+/// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module that
+/// represents module-level flags. If module-level flags aren't found, it
+/// creates the named metadata that contains them.
+NamedMDNode *Module::getOrInsertModuleFlagsMetadata() {
+ return getOrInsertNamedMetadata("llvm.module.flags");
+}
+
+/// addModuleFlag - Add a module-level flag to the module-level flags
+/// metadata. It will create the module-level flags named metadata if it doesn't
+/// already exist.
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ Metadata *Val) {
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ Metadata *Ops[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Behavior)),
+ MDString::get(Context, Key), Val};
+ getOrInsertModuleFlagsMetadata()->addOperand(MDNode::get(Context, Ops));
+}
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ Constant *Val) {
+ addModuleFlag(Behavior, Key, ConstantAsMetadata::get(Val));
+}
+void Module::addModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ uint32_t Val) {
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ addModuleFlag(Behavior, Key, ConstantInt::get(Int32Ty, Val));
+}
+void Module::addModuleFlag(MDNode *Node) {
+ assert(Node->getNumOperands() == 3 &&
+ "Invalid number of operands for module flag!");
+ assert(mdconst::hasa<ConstantInt>(Node->getOperand(0)) &&
+ isa<MDString>(Node->getOperand(1)) &&
+ "Invalid operand types for module flag!");
+ getOrInsertModuleFlagsMetadata()->addOperand(Node);
+}
+
+void Module::setModuleFlag(ModFlagBehavior Behavior, StringRef Key,
+ Metadata *Val) {
+ NamedMDNode *ModFlags = getOrInsertModuleFlagsMetadata();
+ // Replace the flag if it already exists.
+ for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
+ MDNode *Flag = ModFlags->getOperand(I);
+ ModFlagBehavior MFB;
+ MDString *K = nullptr;
+ Metadata *V = nullptr;
+ if (isValidModuleFlag(*Flag, MFB, K, V) && K->getString() == Key) {
+ Flag->replaceOperandWith(2, Val);
+ return;
+ }
+ }
+ addModuleFlag(Behavior, Key, Val);
+}
+
+void Module::setDataLayout(StringRef Desc) {
+ DL.reset(Desc);
+}
+
+void Module::setDataLayout(const DataLayout &Other) { DL = Other; }
+
+const DataLayout &Module::getDataLayout() const { return DL; }
+
+DICompileUnit *Module::debug_compile_units_iterator::operator*() const {
+ return cast<DICompileUnit>(CUs->getOperand(Idx));
+}
+DICompileUnit *Module::debug_compile_units_iterator::operator->() const {
+ return cast<DICompileUnit>(CUs->getOperand(Idx));
+}
+
+void Module::debug_compile_units_iterator::SkipNoDebugCUs() {
+ while (CUs && (Idx < CUs->getNumOperands()) &&
+ ((*this)->getEmissionKind() == DICompileUnit::NoDebug))
+ ++Idx;
+}
+
+iterator_range<Module::global_object_iterator> Module::global_objects() {
+ return concat<GlobalObject>(functions(), globals());
+}
+iterator_range<Module::const_global_object_iterator>
+Module::global_objects() const {
+ return concat<const GlobalObject>(functions(), globals());
+}
+
+iterator_range<Module::global_value_iterator> Module::global_values() {
+ return concat<GlobalValue>(functions(), globals(), aliases(), ifuncs());
+}
+iterator_range<Module::const_global_value_iterator>
+Module::global_values() const {
+ return concat<const GlobalValue>(functions(), globals(), aliases(), ifuncs());
+}
+
+//===----------------------------------------------------------------------===//
+// Methods to control the materialization of GlobalValues in the Module.
+//
+void Module::setMaterializer(GVMaterializer *GVM) {
+ assert(!Materializer &&
+ "Module already has a GVMaterializer. Call materializeAll"
+ " to clear it out before setting another one.");
+ Materializer.reset(GVM);
+}
+
+Error Module::materialize(GlobalValue *GV) {
+ if (!Materializer)
+ return Error::success();
+
+ return Materializer->materialize(GV);
+}
+
+Error Module::materializeAll() {
+ if (!Materializer)
+ return Error::success();
+ std::unique_ptr<GVMaterializer> M = std::move(Materializer);
+ return M->materializeModule();
+}
+
+Error Module::materializeMetadata() {
+ if (!Materializer)
+ return Error::success();
+ return Materializer->materializeMetadata();
+}
+
+//===----------------------------------------------------------------------===//
+// Other module related stuff.
+//
+
+std::vector<StructType *> Module::getIdentifiedStructTypes() const {
+ // If we have a materializer, it is possible that some unread function
+ // uses a type that is currently not visible to a TypeFinder, so ask
+ // the materializer which types it created.
+ if (Materializer)
+ return Materializer->getIdentifiedStructTypes();
+
+ std::vector<StructType *> Ret;
+ TypeFinder SrcStructTypes;
+ SrcStructTypes.run(*this, true);
+ Ret.assign(SrcStructTypes.begin(), SrcStructTypes.end());
+ return Ret;
+}
+
+std::string Module::getUniqueIntrinsicName(StringRef BaseName, Intrinsic::ID Id,
+ const FunctionType *Proto) {
+ auto Encode = [&BaseName](unsigned Suffix) {
+ return (Twine(BaseName) + "." + Twine(Suffix)).str();
+ };
+
+ {
+ // fast path - the prototype is already known
+ auto UinItInserted = UniquedIntrinsicNames.insert({{Id, Proto}, 0});
+ if (!UinItInserted.second)
+ return Encode(UinItInserted.first->second);
+ }
+
+ // Not known yet. A new entry was created with index 0. Check if there already
+ // exists a matching declaration, or select a new entry.
+
+ // Start looking for names with the current known maximum count (or 0).
+ auto NiidItInserted = CurrentIntrinsicIds.insert({BaseName, 0});
+ unsigned Count = NiidItInserted.first->second;
+
+ // This might be slow if a whole population of intrinsics already existed, but
+ // we cache the values for later usage.
+ std::string NewName;
+ while (true) {
+ NewName = Encode(Count);
+ GlobalValue *F = getNamedValue(NewName);
+ if (!F) {
+ // Reserve this entry for the new proto
+ UniquedIntrinsicNames[{Id, Proto}] = Count;
+ break;
+ }
+
+ // A declaration with this name already exists. Remember it.
+ FunctionType *FT = dyn_cast<FunctionType>(F->getValueType());
+ auto UinItInserted = UniquedIntrinsicNames.insert({{Id, FT}, Count});
+ if (FT == Proto) {
+ // It was a declaration for our prototype. This entry was allocated in the
+ // beginning. Update the count to match the existing declaration.
+ UinItInserted.first->second = Count;
+ break;
+ }
+
+ ++Count;
+ }
+
+ NiidItInserted.first->second = Count + 1;
+
+ return NewName;
+}
+
+// dropAllReferences() - This function causes all the subelements to "let go"
+// of all references that they are maintaining. This allows one to 'delete' a
+// whole module at a time, even though there may be circular references... first
+// all references are dropped, and all use counts go to zero. Then everything
+// is deleted for real. Note that no operations are valid on an object that
+// has "dropped all references", except operator delete.
+//
+void Module::dropAllReferences() {
+ for (Function &F : *this)
+ F.dropAllReferences();
+
+ for (GlobalVariable &GV : globals())
+ GV.dropAllReferences();
+
+ for (GlobalAlias &GA : aliases())
+ GA.dropAllReferences();
+
+ for (GlobalIFunc &GIF : ifuncs())
+ GIF.dropAllReferences();
+}
+
+unsigned Module::getNumberRegisterParameters() const {
+ auto *Val =
+ cast_or_null<ConstantAsMetadata>(getModuleFlag("NumRegisterParameters"));
+ if (!Val)
+ return 0;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
+unsigned Module::getDwarfVersion() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("Dwarf Version"));
+ if (!Val)
+ return 0;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
+bool Module::isDwarf64() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("DWARF64"));
+ return Val && cast<ConstantInt>(Val->getValue())->isOne();
+}
+
+unsigned Module::getCodeViewFlag() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("CodeView"));
+ if (!Val)
+ return 0;
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
+unsigned Module::getInstructionCount() const {
+ unsigned NumInstrs = 0;
+ for (const Function &F : FunctionList)
+ NumInstrs += F.getInstructionCount();
+ return NumInstrs;
+}
+
+Comdat *Module::getOrInsertComdat(StringRef Name) {
+ auto &Entry = *ComdatSymTab.insert(std::make_pair(Name, Comdat())).first;
+ Entry.second.Name = &Entry;
+ return &Entry.second;
+}
+
+PICLevel::Level Module::getPICLevel() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("PIC Level"));
+
+ if (!Val)
+ return PICLevel::NotPIC;
+
+ return static_cast<PICLevel::Level>(
+ cast<ConstantInt>(Val->getValue())->getZExtValue());
+}
+
+void Module::setPICLevel(PICLevel::Level PL) {
+ addModuleFlag(ModFlagBehavior::Max, "PIC Level", PL);
+}
+
+PIELevel::Level Module::getPIELevel() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("PIE Level"));
+
+ if (!Val)
+ return PIELevel::Default;
+
+ return static_cast<PIELevel::Level>(
+ cast<ConstantInt>(Val->getValue())->getZExtValue());
+}
+
+void Module::setPIELevel(PIELevel::Level PL) {
+ addModuleFlag(ModFlagBehavior::Max, "PIE Level", PL);
+}
+
+Optional<CodeModel::Model> Module::getCodeModel() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("Code Model"));
+
+ if (!Val)
+ return None;
+
+ return static_cast<CodeModel::Model>(
+ cast<ConstantInt>(Val->getValue())->getZExtValue());
+}
+
+void Module::setCodeModel(CodeModel::Model CL) {
+ // Linking object files with different code models is undefined behavior
+ // because the compiler would have to generate additional code (to span
+ // longer jumps) if a larger code model is used with a smaller one.
+ // Therefore we will treat attempts to mix code models as an error.
+ addModuleFlag(ModFlagBehavior::Error, "Code Model", CL);
+}
+
+void Module::setProfileSummary(Metadata *M, ProfileSummary::Kind Kind) {
+ if (Kind == ProfileSummary::PSK_CSInstr)
+ setModuleFlag(ModFlagBehavior::Error, "CSProfileSummary", M);
+ else
+ setModuleFlag(ModFlagBehavior::Error, "ProfileSummary", M);
+}
+
+Metadata *Module::getProfileSummary(bool IsCS) const {
+ return (IsCS ? getModuleFlag("CSProfileSummary")
+ : getModuleFlag("ProfileSummary"));
+}
+
+bool Module::getSemanticInterposition() const {
+ Metadata *MF = getModuleFlag("SemanticInterposition");
+
+ auto *Val = cast_or_null<ConstantAsMetadata>(MF);
+ if (!Val)
+ return false;
+
+ return cast<ConstantInt>(Val->getValue())->getZExtValue();
+}
+
+void Module::setSemanticInterposition(bool SI) {
+ addModuleFlag(ModFlagBehavior::Error, "SemanticInterposition", SI);
+}
+
+void Module::setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB) {
+ OwnedMemoryBuffer = std::move(MB);
+}
+
+bool Module::getRtLibUseGOT() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("RtLibUseGOT"));
+ return Val && (cast<ConstantInt>(Val->getValue())->getZExtValue() > 0);
+}
+
+void Module::setRtLibUseGOT() {
+ addModuleFlag(ModFlagBehavior::Max, "RtLibUseGOT", 1);
+}
+
+UWTableKind Module::getUwtable() const {
+ if (auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("uwtable")))
+ return UWTableKind(cast<ConstantInt>(Val->getValue())->getZExtValue());
+ return UWTableKind::None;
+}
+
+void Module::setUwtable(UWTableKind Kind) {
+ addModuleFlag(ModFlagBehavior::Max, "uwtable", uint32_t(Kind));
+}
+
+FramePointerKind Module::getFramePointer() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("frame-pointer"));
+ return static_cast<FramePointerKind>(
+ Val ? cast<ConstantInt>(Val->getValue())->getZExtValue() : 0);
+}
+
+void Module::setFramePointer(FramePointerKind Kind) {
+ addModuleFlag(ModFlagBehavior::Max, "frame-pointer", static_cast<int>(Kind));
+}
+
+StringRef Module::getStackProtectorGuard() const {
+ Metadata *MD = getModuleFlag("stack-protector-guard");
+ if (auto *MDS = dyn_cast_or_null<MDString>(MD))
+ return MDS->getString();
+ return {};
+}
+
+void Module::setStackProtectorGuard(StringRef Kind) {
+ MDString *ID = MDString::get(getContext(), Kind);
+ addModuleFlag(ModFlagBehavior::Error, "stack-protector-guard", ID);
+}
+
+StringRef Module::getStackProtectorGuardReg() const {
+ Metadata *MD = getModuleFlag("stack-protector-guard-reg");
+ if (auto *MDS = dyn_cast_or_null<MDString>(MD))
+ return MDS->getString();
+ return {};
+}
+
+void Module::setStackProtectorGuardReg(StringRef Reg) {
+ MDString *ID = MDString::get(getContext(), Reg);
+ addModuleFlag(ModFlagBehavior::Error, "stack-protector-guard-reg", ID);
+}
+
+StringRef Module::getStackProtectorGuardSymbol() const {
+ Metadata *MD = getModuleFlag("stack-protector-guard-symbol");
+ if (auto *MDS = dyn_cast_or_null<MDString>(MD))
+ return MDS->getString();
+ return {};
+}
+
+void Module::setStackProtectorGuardSymbol(StringRef Symbol) {
+ MDString *ID = MDString::get(getContext(), Symbol);
+ addModuleFlag(ModFlagBehavior::Error, "stack-protector-guard-symbol", ID);
+}
+
+int Module::getStackProtectorGuardOffset() const {
+ Metadata *MD = getModuleFlag("stack-protector-guard-offset");
+ if (auto *CI = mdconst::dyn_extract_or_null<ConstantInt>(MD))
+ return CI->getSExtValue();
+ return INT_MAX;
+}
+
+void Module::setStackProtectorGuardOffset(int Offset) {
+ addModuleFlag(ModFlagBehavior::Error, "stack-protector-guard-offset", Offset);
+}
+
+unsigned Module::getOverrideStackAlignment() const {
+ Metadata *MD = getModuleFlag("override-stack-alignment");
+ if (auto *CI = mdconst::dyn_extract_or_null<ConstantInt>(MD))
+ return CI->getZExtValue();
+ return 0;
+}
+
+void Module::setOverrideStackAlignment(unsigned Align) {
+ addModuleFlag(ModFlagBehavior::Error, "override-stack-alignment", Align);
+}
+
+static void addSDKVersionMD(const VersionTuple &V, Module &M, StringRef Name) {
+ SmallVector<unsigned, 3> Entries;
+ Entries.push_back(V.getMajor());
+ if (auto Minor = V.getMinor()) {
+ Entries.push_back(*Minor);
+ if (auto Subminor = V.getSubminor())
+ Entries.push_back(*Subminor);
+ // Ignore the 'build' component as it can't be represented in the object
+ // file.
+ }
+ M.addModuleFlag(Module::ModFlagBehavior::Warning, Name,
+ ConstantDataArray::get(M.getContext(), Entries));
+}
+
+void Module::setSDKVersion(const VersionTuple &V) {
+ addSDKVersionMD(V, *this, "SDK Version");
+}
+
+static VersionTuple getSDKVersionMD(Metadata *MD) {
+ auto *CM = dyn_cast_or_null<ConstantAsMetadata>(MD);
+ if (!CM)
+ return {};
+ auto *Arr = dyn_cast_or_null<ConstantDataArray>(CM->getValue());
+ if (!Arr)
+ return {};
+ auto getVersionComponent = [&](unsigned Index) -> Optional<unsigned> {
+ if (Index >= Arr->getNumElements())
+ return None;
+ return (unsigned)Arr->getElementAsInteger(Index);
+ };
+ auto Major = getVersionComponent(0);
+ if (!Major)
+ return {};
+ VersionTuple Result = VersionTuple(*Major);
+ if (auto Minor = getVersionComponent(1)) {
+ Result = VersionTuple(*Major, *Minor);
+ if (auto Subminor = getVersionComponent(2)) {
+ Result = VersionTuple(*Major, *Minor, *Subminor);
+ }
+ }
+ return Result;
+}
+
+VersionTuple Module::getSDKVersion() const {
+ return getSDKVersionMD(getModuleFlag("SDK Version"));
+}
+
+GlobalVariable *llvm::collectUsedGlobalVariables(
+ const Module &M, SmallVectorImpl<GlobalValue *> &Vec, bool CompilerUsed) {
+ const char *Name = CompilerUsed ? "llvm.compiler.used" : "llvm.used";
+ GlobalVariable *GV = M.getGlobalVariable(Name);
+ if (!GV || !GV->hasInitializer())
+ return GV;
+
+ const ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
+ for (Value *Op : Init->operands()) {
+ GlobalValue *G = cast<GlobalValue>(Op->stripPointerCasts());
+ Vec.push_back(G);
+ }
+ return GV;
+}
+
+void Module::setPartialSampleProfileRatio(const ModuleSummaryIndex &Index) {
+ if (auto *SummaryMD = getProfileSummary(/*IsCS*/ false)) {
+ std::unique_ptr<ProfileSummary> ProfileSummary(
+ ProfileSummary::getFromMD(SummaryMD));
+ if (ProfileSummary) {
+ if (ProfileSummary->getKind() != ProfileSummary::PSK_Sample ||
+ !ProfileSummary->isPartialProfile())
+ return;
+ uint64_t BlockCount = Index.getBlockCount();
+ uint32_t NumCounts = ProfileSummary->getNumCounts();
+ if (!NumCounts)
+ return;
+ double Ratio = (double)BlockCount / NumCounts;
+ ProfileSummary->setPartialProfileRatio(Ratio);
+ setProfileSummary(ProfileSummary->getMD(getContext()),
+ ProfileSummary::PSK_Sample);
+ }
+ }
+}
+
+StringRef Module::getDarwinTargetVariantTriple() const {
+ if (const auto *MD = getModuleFlag("darwin.target_variant.triple"))
+ return cast<MDString>(MD)->getString();
+ return "";
+}
+
+void Module::setDarwinTargetVariantTriple(StringRef T) {
+ addModuleFlag(ModFlagBehavior::Override, "darwin.target_variant.triple",
+ MDString::get(getContext(), T));
+}
+
+VersionTuple Module::getDarwinTargetVariantSDKVersion() const {
+ return getSDKVersionMD(getModuleFlag("darwin.target_variant.SDK Version"));
+}
+
+void Module::setDarwinTargetVariantSDKVersion(VersionTuple Version) {
+ addSDKVersionMD(Version, *this, "darwin.target_variant.SDK Version");
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp b/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp
new file mode 100644
index 000000000000..0ca40a675fe4
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ModuleSummaryIndex.cpp
@@ -0,0 +1,670 @@
+//===-- ModuleSummaryIndex.cpp - Module Summary Index ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the module index and summary classes for the
+// IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "module-summary-index"
+
+STATISTIC(ReadOnlyLiveGVars,
+ "Number of live global variables marked read only");
+STATISTIC(WriteOnlyLiveGVars,
+ "Number of live global variables marked write only");
+
+static cl::opt<bool> PropagateAttrs("propagate-attrs", cl::init(true),
+ cl::Hidden,
+ cl::desc("Propagate attributes in index"));
+
+static cl::opt<bool> ImportConstantsWithRefs(
+ "import-constants-with-refs", cl::init(true), cl::Hidden,
+ cl::desc("Import constant global variables with references"));
+
+constexpr uint32_t FunctionSummary::ParamAccess::RangeWidth;
+
+FunctionSummary FunctionSummary::ExternalNode =
+ FunctionSummary::makeDummyFunctionSummary({});
+
+GlobalValue::VisibilityTypes ValueInfo::getELFVisibility() const {
+ bool HasProtected = false;
+ for (const auto &S : make_pointee_range(getSummaryList())) {
+ if (S.getVisibility() == GlobalValue::HiddenVisibility)
+ return GlobalValue::HiddenVisibility;
+ if (S.getVisibility() == GlobalValue::ProtectedVisibility)
+ HasProtected = true;
+ }
+ return HasProtected ? GlobalValue::ProtectedVisibility
+ : GlobalValue::DefaultVisibility;
+}
+
+bool ValueInfo::isDSOLocal(bool WithDSOLocalPropagation) const {
+ // With DSOLocal propagation done, the flag in evey summary is the same.
+ // Check the first one is enough.
+ return WithDSOLocalPropagation
+ ? getSummaryList().size() && getSummaryList()[0]->isDSOLocal()
+ : getSummaryList().size() &&
+ llvm::all_of(
+ getSummaryList(),
+ [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ return Summary->isDSOLocal();
+ });
+}
+
+bool ValueInfo::canAutoHide() const {
+ // Can only auto hide if all copies are eligible to auto hide.
+ return getSummaryList().size() &&
+ llvm::all_of(getSummaryList(),
+ [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ return Summary->canAutoHide();
+ });
+}
+
+// Gets the number of readonly and writeonly refs in RefEdgeList
+std::pair<unsigned, unsigned> FunctionSummary::specialRefCounts() const {
+ // Here we take advantage of having all readonly and writeonly references
+ // located in the end of the RefEdgeList.
+ auto Refs = refs();
+ unsigned RORefCnt = 0, WORefCnt = 0;
+ int I;
+ for (I = Refs.size() - 1; I >= 0 && Refs[I].isWriteOnly(); --I)
+ WORefCnt++;
+ for (; I >= 0 && Refs[I].isReadOnly(); --I)
+ RORefCnt++;
+ return {RORefCnt, WORefCnt};
+}
+
+constexpr uint64_t ModuleSummaryIndex::BitcodeSummaryVersion;
+
+uint64_t ModuleSummaryIndex::getFlags() const {
+ uint64_t Flags = 0;
+ if (withGlobalValueDeadStripping())
+ Flags |= 0x1;
+ if (skipModuleByDistributedBackend())
+ Flags |= 0x2;
+ if (hasSyntheticEntryCounts())
+ Flags |= 0x4;
+ if (enableSplitLTOUnit())
+ Flags |= 0x8;
+ if (partiallySplitLTOUnits())
+ Flags |= 0x10;
+ if (withAttributePropagation())
+ Flags |= 0x20;
+ if (withDSOLocalPropagation())
+ Flags |= 0x40;
+ return Flags;
+}
+
+void ModuleSummaryIndex::setFlags(uint64_t Flags) {
+ assert(Flags <= 0x7f && "Unexpected bits in flag");
+ // 1 bit: WithGlobalValueDeadStripping flag.
+ // Set on combined index only.
+ if (Flags & 0x1)
+ setWithGlobalValueDeadStripping();
+ // 1 bit: SkipModuleByDistributedBackend flag.
+ // Set on combined index only.
+ if (Flags & 0x2)
+ setSkipModuleByDistributedBackend();
+ // 1 bit: HasSyntheticEntryCounts flag.
+ // Set on combined index only.
+ if (Flags & 0x4)
+ setHasSyntheticEntryCounts();
+ // 1 bit: DisableSplitLTOUnit flag.
+ // Set on per module indexes. It is up to the client to validate
+ // the consistency of this flag across modules being linked.
+ if (Flags & 0x8)
+ setEnableSplitLTOUnit();
+ // 1 bit: PartiallySplitLTOUnits flag.
+ // Set on combined index only.
+ if (Flags & 0x10)
+ setPartiallySplitLTOUnits();
+ // 1 bit: WithAttributePropagation flag.
+ // Set on combined index only.
+ if (Flags & 0x20)
+ setWithAttributePropagation();
+ // 1 bit: WithDSOLocalPropagation flag.
+ // Set on combined index only.
+ if (Flags & 0x40)
+ setWithDSOLocalPropagation();
+}
+
+// Collect for the given module the list of function it defines
+// (GUID -> Summary).
+void ModuleSummaryIndex::collectDefinedFunctionsForModule(
+ StringRef ModulePath, GVSummaryMapTy &GVSummaryMap) const {
+ for (auto &GlobalList : *this) {
+ auto GUID = GlobalList.first;
+ for (auto &GlobSummary : GlobalList.second.SummaryList) {
+ auto *Summary = dyn_cast_or_null<FunctionSummary>(GlobSummary.get());
+ if (!Summary)
+ // Ignore global variable, focus on functions
+ continue;
+ // Ignore summaries from other modules.
+ if (Summary->modulePath() != ModulePath)
+ continue;
+ GVSummaryMap[GUID] = Summary;
+ }
+ }
+}
+
+GlobalValueSummary *
+ModuleSummaryIndex::getGlobalValueSummary(uint64_t ValueGUID,
+ bool PerModuleIndex) const {
+ auto VI = getValueInfo(ValueGUID);
+ assert(VI && "GlobalValue not found in index");
+ assert((!PerModuleIndex || VI.getSummaryList().size() == 1) &&
+ "Expected a single entry per global value in per-module index");
+ auto &Summary = VI.getSummaryList()[0];
+ return Summary.get();
+}
+
+bool ModuleSummaryIndex::isGUIDLive(GlobalValue::GUID GUID) const {
+ auto VI = getValueInfo(GUID);
+ if (!VI)
+ return true;
+ const auto &SummaryList = VI.getSummaryList();
+ if (SummaryList.empty())
+ return true;
+ for (auto &I : SummaryList)
+ if (isGlobalValueLive(I.get()))
+ return true;
+ return false;
+}
+
+static void
+propagateAttributesToRefs(GlobalValueSummary *S,
+ DenseSet<ValueInfo> &MarkedNonReadWriteOnly) {
+ // If reference is not readonly or writeonly then referenced summary is not
+ // read/writeonly either. Note that:
+ // - All references from GlobalVarSummary are conservatively considered as
+ // not readonly or writeonly. Tracking them properly requires more complex
+ // analysis then we have now.
+ //
+ // - AliasSummary objects have no refs at all so this function is a no-op
+ // for them.
+ for (auto &VI : S->refs()) {
+ assert(VI.getAccessSpecifier() == 0 || isa<FunctionSummary>(S));
+ if (!VI.getAccessSpecifier()) {
+ if (!MarkedNonReadWriteOnly.insert(VI).second)
+ continue;
+ } else if (MarkedNonReadWriteOnly.contains(VI))
+ continue;
+ for (auto &Ref : VI.getSummaryList())
+ // If references to alias is not read/writeonly then aliasee
+ // is not read/writeonly
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(Ref->getBaseObject())) {
+ if (!VI.isReadOnly())
+ GVS->setReadOnly(false);
+ if (!VI.isWriteOnly())
+ GVS->setWriteOnly(false);
+ }
+ }
+}
+
+// Do the access attribute and DSOLocal propagation in combined index.
+// The goal of attribute propagation is internalization of readonly (RO)
+// or writeonly (WO) variables. To determine which variables are RO or WO
+// and which are not we take following steps:
+// - During analysis we speculatively assign readonly and writeonly
+// attribute to all variables which can be internalized. When computing
+// function summary we also assign readonly or writeonly attribute to a
+// reference if function doesn't modify referenced variable (readonly)
+// or doesn't read it (writeonly).
+//
+// - After computing dead symbols in combined index we do the attribute
+// and DSOLocal propagation. During this step we:
+// a. clear RO and WO attributes from variables which are preserved or
+// can't be imported
+// b. clear RO and WO attributes from variables referenced by any global
+// variable initializer
+// c. clear RO attribute from variable referenced by a function when
+// reference is not readonly
+// d. clear WO attribute from variable referenced by a function when
+// reference is not writeonly
+// e. clear IsDSOLocal flag in every summary if any of them is false.
+//
+// Because of (c, d) we don't internalize variables read by function A
+// and modified by function B.
+//
+// Internalization itself happens in the backend after import is finished
+// See internalizeGVsAfterImport.
+void ModuleSummaryIndex::propagateAttributes(
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ if (!PropagateAttrs)
+ return;
+ DenseSet<ValueInfo> MarkedNonReadWriteOnly;
+ for (auto &P : *this) {
+ bool IsDSOLocal = true;
+ for (auto &S : P.second.SummaryList) {
+ if (!isGlobalValueLive(S.get())) {
+ // computeDeadSymbolsAndUpdateIndirectCalls should have marked all
+ // copies live. Note that it is possible that there is a GUID collision
+ // between internal symbols with the same name in different files of the
+ // same name but not enough distinguishing path. Because
+ // computeDeadSymbolsAndUpdateIndirectCalls should conservatively mark
+ // all copies live we can assert here that all are dead if any copy is
+ // dead.
+ assert(llvm::none_of(
+ P.second.SummaryList,
+ [&](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ return isGlobalValueLive(Summary.get());
+ }));
+ // We don't examine references from dead objects
+ break;
+ }
+
+ // Global variable can't be marked read/writeonly if it is not eligible
+ // to import since we need to ensure that all external references get
+ // a local (imported) copy. It also can't be marked read/writeonly if
+ // it or any alias (since alias points to the same memory) are preserved
+ // or notEligibleToImport, since either of those means there could be
+ // writes (or reads in case of writeonly) that are not visible (because
+ // preserved means it could have external to DSO writes or reads, and
+ // notEligibleToImport means it could have writes or reads via inline
+ // assembly leading it to be in the @llvm.*used).
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(S->getBaseObject()))
+ // Here we intentionally pass S.get() not GVS, because S could be
+ // an alias. We don't analyze references here, because we have to
+ // know exactly if GV is readonly to do so.
+ if (!canImportGlobalVar(S.get(), /* AnalyzeRefs */ false) ||
+ GUIDPreservedSymbols.count(P.first)) {
+ GVS->setReadOnly(false);
+ GVS->setWriteOnly(false);
+ }
+ propagateAttributesToRefs(S.get(), MarkedNonReadWriteOnly);
+
+ // If the flag from any summary is false, the GV is not DSOLocal.
+ IsDSOLocal &= S->isDSOLocal();
+ }
+ if (!IsDSOLocal)
+ // Mark the flag in all summaries false so that we can do quick check
+ // without going through the whole list.
+ for (const std::unique_ptr<GlobalValueSummary> &Summary :
+ P.second.SummaryList)
+ Summary->setDSOLocal(false);
+ }
+ setWithAttributePropagation();
+ setWithDSOLocalPropagation();
+ if (llvm::AreStatisticsEnabled())
+ for (auto &P : *this)
+ if (P.second.SummaryList.size())
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(
+ P.second.SummaryList[0]->getBaseObject()))
+ if (isGlobalValueLive(GVS)) {
+ if (GVS->maybeReadOnly())
+ ReadOnlyLiveGVars++;
+ if (GVS->maybeWriteOnly())
+ WriteOnlyLiveGVars++;
+ }
+}
+
+bool ModuleSummaryIndex::canImportGlobalVar(GlobalValueSummary *S,
+ bool AnalyzeRefs) const {
+ auto HasRefsPreventingImport = [this](const GlobalVarSummary *GVS) {
+ // We don't analyze GV references during attribute propagation, so
+ // GV with non-trivial initializer can be marked either read or
+ // write-only.
+ // Importing definiton of readonly GV with non-trivial initializer
+ // allows us doing some extra optimizations (like converting indirect
+ // calls to direct).
+ // Definition of writeonly GV with non-trivial initializer should also
+ // be imported. Not doing so will result in:
+ // a) GV internalization in source module (because it's writeonly)
+ // b) Importing of GV declaration to destination module as a result
+ // of promotion.
+ // c) Link error (external declaration with internal definition).
+ // However we do not promote objects referenced by writeonly GV
+ // initializer by means of converting it to 'zeroinitializer'
+ return !(ImportConstantsWithRefs && GVS->isConstant()) &&
+ !isReadOnly(GVS) && !isWriteOnly(GVS) && GVS->refs().size();
+ };
+ auto *GVS = cast<GlobalVarSummary>(S->getBaseObject());
+
+ // Global variable with non-trivial initializer can be imported
+ // if it's readonly. This gives us extra opportunities for constant
+ // folding and converting indirect calls to direct calls. We don't
+ // analyze GV references during attribute propagation, because we
+ // don't know yet if it is readonly or not.
+ return !GlobalValue::isInterposableLinkage(S->linkage()) &&
+ !S->notEligibleToImport() &&
+ (!AnalyzeRefs || !HasRefsPreventingImport(GVS));
+}
+
+// TODO: write a graphviz dumper for SCCs (see ModuleSummaryIndex::exportToDot)
+// then delete this function and update its tests
+LLVM_DUMP_METHOD
+void ModuleSummaryIndex::dumpSCCs(raw_ostream &O) {
+ for (scc_iterator<ModuleSummaryIndex *> I =
+ scc_begin<ModuleSummaryIndex *>(this);
+ !I.isAtEnd(); ++I) {
+ O << "SCC (" << utostr(I->size()) << " node" << (I->size() == 1 ? "" : "s")
+ << ") {\n";
+ for (const ValueInfo &V : *I) {
+ FunctionSummary *F = nullptr;
+ if (V.getSummaryList().size())
+ F = cast<FunctionSummary>(V.getSummaryList().front().get());
+ O << " " << (F == nullptr ? "External" : "") << " " << utostr(V.getGUID())
+ << (I.hasCycle() ? " (has cycle)" : "") << "\n";
+ }
+ O << "}\n";
+ }
+}
+
+namespace {
+struct Attributes {
+ void add(const Twine &Name, const Twine &Value,
+ const Twine &Comment = Twine());
+ void addComment(const Twine &Comment);
+ std::string getAsString() const;
+
+ std::vector<std::string> Attrs;
+ std::string Comments;
+};
+
+struct Edge {
+ uint64_t SrcMod;
+ int Hotness;
+ GlobalValue::GUID Src;
+ GlobalValue::GUID Dst;
+};
+}
+
+void Attributes::add(const Twine &Name, const Twine &Value,
+ const Twine &Comment) {
+ std::string A = Name.str();
+ A += "=\"";
+ A += Value.str();
+ A += "\"";
+ Attrs.push_back(A);
+ addComment(Comment);
+}
+
+void Attributes::addComment(const Twine &Comment) {
+ if (!Comment.isTriviallyEmpty()) {
+ if (Comments.empty())
+ Comments = " // ";
+ else
+ Comments += ", ";
+ Comments += Comment.str();
+ }
+}
+
+std::string Attributes::getAsString() const {
+ if (Attrs.empty())
+ return "";
+
+ std::string Ret = "[";
+ for (auto &A : Attrs)
+ Ret += A + ",";
+ Ret.pop_back();
+ Ret += "];";
+ Ret += Comments;
+ return Ret;
+}
+
+static std::string linkageToString(GlobalValue::LinkageTypes LT) {
+ switch (LT) {
+ case GlobalValue::ExternalLinkage:
+ return "extern";
+ case GlobalValue::AvailableExternallyLinkage:
+ return "av_ext";
+ case GlobalValue::LinkOnceAnyLinkage:
+ return "linkonce";
+ case GlobalValue::LinkOnceODRLinkage:
+ return "linkonce_odr";
+ case GlobalValue::WeakAnyLinkage:
+ return "weak";
+ case GlobalValue::WeakODRLinkage:
+ return "weak_odr";
+ case GlobalValue::AppendingLinkage:
+ return "appending";
+ case GlobalValue::InternalLinkage:
+ return "internal";
+ case GlobalValue::PrivateLinkage:
+ return "private";
+ case GlobalValue::ExternalWeakLinkage:
+ return "extern_weak";
+ case GlobalValue::CommonLinkage:
+ return "common";
+ }
+
+ return "<unknown>";
+}
+
+static std::string fflagsToString(FunctionSummary::FFlags F) {
+ auto FlagValue = [](unsigned V) { return V ? '1' : '0'; };
+ char FlagRep[] = {FlagValue(F.ReadNone),
+ FlagValue(F.ReadOnly),
+ FlagValue(F.NoRecurse),
+ FlagValue(F.ReturnDoesNotAlias),
+ FlagValue(F.NoInline),
+ FlagValue(F.AlwaysInline),
+ FlagValue(F.NoUnwind),
+ FlagValue(F.MayThrow),
+ FlagValue(F.HasUnknownCall),
+ FlagValue(F.MustBeUnreachable),
+ 0};
+
+ return FlagRep;
+}
+
+// Get string representation of function instruction count and flags.
+static std::string getSummaryAttributes(GlobalValueSummary* GVS) {
+ auto *FS = dyn_cast_or_null<FunctionSummary>(GVS);
+ if (!FS)
+ return "";
+
+ return std::string("inst: ") + std::to_string(FS->instCount()) +
+ ", ffl: " + fflagsToString(FS->fflags());
+}
+
+static std::string getNodeVisualName(GlobalValue::GUID Id) {
+ return std::string("@") + std::to_string(Id);
+}
+
+static std::string getNodeVisualName(const ValueInfo &VI) {
+ return VI.name().empty() ? getNodeVisualName(VI.getGUID()) : VI.name().str();
+}
+
+static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS) {
+ if (isa<AliasSummary>(GVS))
+ return getNodeVisualName(VI);
+
+ std::string Attrs = getSummaryAttributes(GVS);
+ std::string Label =
+ getNodeVisualName(VI) + "|" + linkageToString(GVS->linkage());
+ if (!Attrs.empty())
+ Label += std::string(" (") + Attrs + ")";
+ Label += "}";
+
+ return Label;
+}
+
+// Write definition of external node, which doesn't have any
+// specific module associated with it. Typically this is function
+// or variable defined in native object or library.
+static void defineExternalNode(raw_ostream &OS, const char *Pfx,
+ const ValueInfo &VI, GlobalValue::GUID Id) {
+ auto StrId = std::to_string(Id);
+ OS << " " << StrId << " [label=\"";
+
+ if (VI) {
+ OS << getNodeVisualName(VI);
+ } else {
+ OS << getNodeVisualName(Id);
+ }
+ OS << "\"]; // defined externally\n";
+}
+
+static bool hasReadOnlyFlag(const GlobalValueSummary *S) {
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(S))
+ return GVS->maybeReadOnly();
+ return false;
+}
+
+static bool hasWriteOnlyFlag(const GlobalValueSummary *S) {
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(S))
+ return GVS->maybeWriteOnly();
+ return false;
+}
+
+static bool hasConstantFlag(const GlobalValueSummary *S) {
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(S))
+ return GVS->isConstant();
+ return false;
+}
+
+void ModuleSummaryIndex::exportToDot(
+ raw_ostream &OS,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) const {
+ std::vector<Edge> CrossModuleEdges;
+ DenseMap<GlobalValue::GUID, std::vector<uint64_t>> NodeMap;
+ using GVSOrderedMapTy = std::map<GlobalValue::GUID, GlobalValueSummary *>;
+ std::map<StringRef, GVSOrderedMapTy> ModuleToDefinedGVS;
+ collectDefinedGVSummariesPerModule(ModuleToDefinedGVS);
+
+ // Get node identifier in form MXXX_<GUID>. The MXXX prefix is required,
+ // because we may have multiple linkonce functions summaries.
+ auto NodeId = [](uint64_t ModId, GlobalValue::GUID Id) {
+ return ModId == (uint64_t)-1 ? std::to_string(Id)
+ : std::string("M") + std::to_string(ModId) +
+ "_" + std::to_string(Id);
+ };
+
+ auto DrawEdge = [&](const char *Pfx, uint64_t SrcMod, GlobalValue::GUID SrcId,
+ uint64_t DstMod, GlobalValue::GUID DstId,
+ int TypeOrHotness) {
+ // 0 - alias
+ // 1 - reference
+ // 2 - constant reference
+ // 3 - writeonly reference
+ // Other value: (hotness - 4).
+ TypeOrHotness += 4;
+ static const char *EdgeAttrs[] = {
+ " [style=dotted]; // alias",
+ " [style=dashed]; // ref",
+ " [style=dashed,color=forestgreen]; // const-ref",
+ " [style=dashed,color=violetred]; // writeOnly-ref",
+ " // call (hotness : Unknown)",
+ " [color=blue]; // call (hotness : Cold)",
+ " // call (hotness : None)",
+ " [color=brown]; // call (hotness : Hot)",
+ " [style=bold,color=red]; // call (hotness : Critical)"};
+
+ assert(static_cast<size_t>(TypeOrHotness) <
+ sizeof(EdgeAttrs) / sizeof(EdgeAttrs[0]));
+ OS << Pfx << NodeId(SrcMod, SrcId) << " -> " << NodeId(DstMod, DstId)
+ << EdgeAttrs[TypeOrHotness] << "\n";
+ };
+
+ OS << "digraph Summary {\n";
+ for (auto &ModIt : ModuleToDefinedGVS) {
+ auto ModId = getModuleId(ModIt.first);
+ OS << " // Module: " << ModIt.first << "\n";
+ OS << " subgraph cluster_" << std::to_string(ModId) << " {\n";
+ OS << " style = filled;\n";
+ OS << " color = lightgrey;\n";
+ OS << " label = \"" << sys::path::filename(ModIt.first) << "\";\n";
+ OS << " node [style=filled,fillcolor=lightblue];\n";
+
+ auto &GVSMap = ModIt.second;
+ auto Draw = [&](GlobalValue::GUID IdFrom, GlobalValue::GUID IdTo, int Hotness) {
+ if (!GVSMap.count(IdTo)) {
+ CrossModuleEdges.push_back({ModId, Hotness, IdFrom, IdTo});
+ return;
+ }
+ DrawEdge(" ", ModId, IdFrom, ModId, IdTo, Hotness);
+ };
+
+ for (auto &SummaryIt : GVSMap) {
+ NodeMap[SummaryIt.first].push_back(ModId);
+ auto Flags = SummaryIt.second->flags();
+ Attributes A;
+ if (isa<FunctionSummary>(SummaryIt.second)) {
+ A.add("shape", "record", "function");
+ } else if (isa<AliasSummary>(SummaryIt.second)) {
+ A.add("style", "dotted,filled", "alias");
+ A.add("shape", "box");
+ } else {
+ A.add("shape", "Mrecord", "variable");
+ if (Flags.Live && hasReadOnlyFlag(SummaryIt.second))
+ A.addComment("immutable");
+ if (Flags.Live && hasWriteOnlyFlag(SummaryIt.second))
+ A.addComment("writeOnly");
+ if (Flags.Live && hasConstantFlag(SummaryIt.second))
+ A.addComment("constant");
+ }
+ if (Flags.Visibility)
+ A.addComment("visibility");
+ if (Flags.DSOLocal)
+ A.addComment("dsoLocal");
+ if (Flags.CanAutoHide)
+ A.addComment("canAutoHide");
+ if (GUIDPreservedSymbols.count(SummaryIt.first))
+ A.addComment("preserved");
+
+ auto VI = getValueInfo(SummaryIt.first);
+ A.add("label", getNodeLabel(VI, SummaryIt.second));
+ if (!Flags.Live)
+ A.add("fillcolor", "red", "dead");
+ else if (Flags.NotEligibleToImport)
+ A.add("fillcolor", "yellow", "not eligible to import");
+
+ OS << " " << NodeId(ModId, SummaryIt.first) << " " << A.getAsString()
+ << "\n";
+ }
+ OS << " // Edges:\n";
+
+ for (auto &SummaryIt : GVSMap) {
+ auto *GVS = SummaryIt.second;
+ for (auto &R : GVS->refs())
+ Draw(SummaryIt.first, R.getGUID(),
+ R.isWriteOnly() ? -1 : (R.isReadOnly() ? -2 : -3));
+
+ if (auto *AS = dyn_cast_or_null<AliasSummary>(SummaryIt.second)) {
+ Draw(SummaryIt.first, AS->getAliaseeGUID(), -4);
+ continue;
+ }
+
+ if (auto *FS = dyn_cast_or_null<FunctionSummary>(SummaryIt.second))
+ for (auto &CGEdge : FS->calls())
+ Draw(SummaryIt.first, CGEdge.first.getGUID(),
+ static_cast<int>(CGEdge.second.Hotness));
+ }
+ OS << " }\n";
+ }
+
+ OS << " // Cross-module edges:\n";
+ for (auto &E : CrossModuleEdges) {
+ auto &ModList = NodeMap[E.Dst];
+ if (ModList.empty()) {
+ defineExternalNode(OS, " ", getValueInfo(E.Dst), E.Dst);
+ // Add fake module to the list to draw an edge to an external node
+ // in the loop below.
+ ModList.push_back(-1);
+ }
+ for (auto DstMod : ModList)
+ // The edge representing call or ref is drawn to every module where target
+ // symbol is defined. When target is a linkonce symbol there can be
+ // multiple edges representing a single call or ref, both intra-module and
+ // cross-module. As we've already drawn all intra-module edges before we
+ // skip it here.
+ if (DstMod != E.SrcMod)
+ DrawEdge(" ", E.SrcMod, E.Src, DstMod, E.Dst, E.Hotness);
+ }
+
+ OS << "}";
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Operator.cpp b/contrib/llvm-project/llvm/lib/IR/Operator.cpp
new file mode 100644
index 000000000000..c2a4a7c29915
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Operator.cpp
@@ -0,0 +1,250 @@
+//===-- Operator.cpp - Implement the LLVM operators -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the non-inline methods for the LLVM Operator classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instructions.h"
+
+#include "ConstantsContext.h"
+
+namespace llvm {
+bool Operator::hasPoisonGeneratingFlags() const {
+ switch (getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::Shl: {
+ auto *OBO = cast<OverflowingBinaryOperator>(this);
+ return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
+ }
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::AShr:
+ case Instruction::LShr:
+ return cast<PossiblyExactOperator>(this)->isExact();
+ case Instruction::GetElementPtr: {
+ auto *GEP = cast<GEPOperator>(this);
+ // Note: inrange exists on constexpr only
+ return GEP->isInBounds() || GEP->getInRangeIndex() != None;
+ }
+ default:
+ if (const auto *FP = dyn_cast<FPMathOperator>(this))
+ return FP->hasNoNaNs() || FP->hasNoInfs();
+ return false;
+ }
+}
+
+Type *GEPOperator::getSourceElementType() const {
+ if (auto *I = dyn_cast<GetElementPtrInst>(this))
+ return I->getSourceElementType();
+ return cast<GetElementPtrConstantExpr>(this)->getSourceElementType();
+}
+
+Type *GEPOperator::getResultElementType() const {
+ if (auto *I = dyn_cast<GetElementPtrInst>(this))
+ return I->getResultElementType();
+ return cast<GetElementPtrConstantExpr>(this)->getResultElementType();
+}
+
+Align GEPOperator::getMaxPreservedAlignment(const DataLayout &DL) const {
+ /// compute the worse possible offset for every level of the GEP et accumulate
+ /// the minimum alignment into Result.
+
+ Align Result = Align(llvm::Value::MaximumAlignment);
+ for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
+ GTI != GTE; ++GTI) {
+ int64_t Offset = 1;
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ Offset = SL->getElementOffset(OpC->getZExtValue());
+ } else {
+ assert(GTI.isSequential() && "should be sequencial");
+ /// If the index isn't know we take 1 because it is the index that will
+ /// give the worse alignment of the offset.
+ int64_t ElemCount = 1;
+ if (OpC)
+ ElemCount = OpC->getZExtValue();
+ Offset = DL.getTypeAllocSize(GTI.getIndexedType()) * ElemCount;
+ }
+ Result = Align(MinAlign(Offset, Result.value()));
+ }
+ return Result;
+}
+
+bool GEPOperator::accumulateConstantOffset(
+ const DataLayout &DL, APInt &Offset,
+ function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
+ assert(Offset.getBitWidth() ==
+ DL.getIndexSizeInBits(getPointerAddressSpace()) &&
+ "The offset bit width does not match DL specification.");
+ SmallVector<const Value *> Index(llvm::drop_begin(operand_values()));
+ return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index,
+ DL, Offset, ExternalAnalysis);
+}
+
+bool GEPOperator::accumulateConstantOffset(
+ Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
+ APInt &Offset, function_ref<bool(Value &, APInt &)> ExternalAnalysis) {
+ bool UsedExternalAnalysis = false;
+ auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool {
+ Index = Index.sextOrTrunc(Offset.getBitWidth());
+ APInt IndexedSize = APInt(Offset.getBitWidth(), Size);
+ // For array or vector indices, scale the index by the size of the type.
+ if (!UsedExternalAnalysis) {
+ Offset += Index * IndexedSize;
+ } else {
+ // External Analysis can return a result higher/lower than the value
+ // represents. We need to detect overflow/underflow.
+ bool Overflow = false;
+ APInt OffsetPlus = Index.smul_ov(IndexedSize, Overflow);
+ if (Overflow)
+ return false;
+ Offset = Offset.sadd_ov(OffsetPlus, Overflow);
+ if (Overflow)
+ return false;
+ }
+ return true;
+ };
+ auto begin = generic_gep_type_iterator<decltype(Index.begin())>::begin(
+ SourceType, Index.begin());
+ auto end = generic_gep_type_iterator<decltype(Index.end())>::end(Index.end());
+ for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) {
+ // Scalable vectors are multiplied by a runtime constant.
+ bool ScalableType = false;
+ if (isa<ScalableVectorType>(GTI.getIndexedType()))
+ ScalableType = true;
+
+ Value *V = GTI.getOperand();
+ StructType *STy = GTI.getStructTypeOrNull();
+ // Handle ConstantInt if possible.
+ if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
+ if (ConstOffset->isZero())
+ continue;
+ // if the type is scalable and the constant is not zero (vscale * n * 0 =
+ // 0) bailout.
+ if (ScalableType)
+ return false;
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (STy) {
+ unsigned ElementIdx = ConstOffset->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ // Element offset is in bytes.
+ if (!AccumulateOffset(
+ APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)),
+ 1))
+ return false;
+ continue;
+ }
+ if (!AccumulateOffset(ConstOffset->getValue(),
+ DL.getTypeAllocSize(GTI.getIndexedType())))
+ return false;
+ continue;
+ }
+
+ // The operand is not constant, check if an external analysis was provided.
+ // External analsis is not applicable to a struct type.
+ if (!ExternalAnalysis || STy || ScalableType)
+ return false;
+ APInt AnalysisIndex;
+ if (!ExternalAnalysis(*V, AnalysisIndex))
+ return false;
+ UsedExternalAnalysis = true;
+ if (!AccumulateOffset(AnalysisIndex,
+ DL.getTypeAllocSize(GTI.getIndexedType())))
+ return false;
+ }
+ return true;
+}
+
+bool GEPOperator::collectOffset(
+ const DataLayout &DL, unsigned BitWidth,
+ MapVector<Value *, APInt> &VariableOffsets,
+ APInt &ConstantOffset) const {
+ assert(BitWidth == DL.getIndexSizeInBits(getPointerAddressSpace()) &&
+ "The offset bit width does not match DL specification.");
+
+ auto CollectConstantOffset = [&](APInt Index, uint64_t Size) {
+ Index = Index.sextOrTrunc(BitWidth);
+ APInt IndexedSize = APInt(BitWidth, Size);
+ ConstantOffset += Index * IndexedSize;
+ };
+
+ for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this);
+ GTI != GTE; ++GTI) {
+ // Scalable vectors are multiplied by a runtime constant.
+ bool ScalableType = isa<ScalableVectorType>(GTI.getIndexedType());
+
+ Value *V = GTI.getOperand();
+ StructType *STy = GTI.getStructTypeOrNull();
+ // Handle ConstantInt if possible.
+ if (auto ConstOffset = dyn_cast<ConstantInt>(V)) {
+ if (ConstOffset->isZero())
+ continue;
+ // If the type is scalable and the constant is not zero (vscale * n * 0 =
+ // 0) bailout.
+ // TODO: If the runtime value is accessible at any point before DWARF
+ // emission, then we could potentially keep a forward reference to it
+ // in the debug value to be filled in later.
+ if (ScalableType)
+ return false;
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (STy) {
+ unsigned ElementIdx = ConstOffset->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ // Element offset is in bytes.
+ CollectConstantOffset(APInt(BitWidth, SL->getElementOffset(ElementIdx)),
+ 1);
+ continue;
+ }
+ CollectConstantOffset(ConstOffset->getValue(),
+ DL.getTypeAllocSize(GTI.getIndexedType()));
+ continue;
+ }
+
+ if (STy || ScalableType)
+ return false;
+ APInt IndexedSize =
+ APInt(BitWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
+ // Insert an initial offset of 0 for V iff none exists already, then
+ // increment the offset by IndexedSize.
+ if (!IndexedSize.isZero()) {
+ VariableOffsets.insert({V, APInt(BitWidth, 0)});
+ VariableOffsets[V] += IndexedSize;
+ }
+ }
+ return true;
+}
+
+void FastMathFlags::print(raw_ostream &O) const {
+ if (all())
+ O << " fast";
+ else {
+ if (allowReassoc())
+ O << " reassoc";
+ if (noNaNs())
+ O << " nnan";
+ if (noInfs())
+ O << " ninf";
+ if (noSignedZeros())
+ O << " nsz";
+ if (allowReciprocal())
+ O << " arcp";
+ if (allowContract())
+ O << " contract";
+ if (approxFunc())
+ O << " afn";
+ }
+}
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/OptBisect.cpp b/contrib/llvm-project/llvm/lib/IR/OptBisect.cpp
new file mode 100644
index 000000000000..c9054dba344a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/OptBisect.cpp
@@ -0,0 +1,58 @@
+//===- llvm/IR/OptBisect/Bisect.cpp - LLVM Bisect support -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This file implements support for a bisecting optimizations based on a
+/// command line option.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/OptBisect.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace llvm;
+
+static cl::opt<int> OptBisectLimit("opt-bisect-limit", cl::Hidden,
+ cl::init(OptBisect::Disabled), cl::Optional,
+ cl::cb<void, int>([](int Limit) {
+ llvm::getOptBisector().setLimit(Limit);
+ }),
+ cl::desc("Maximum optimization to perform"));
+
+static void printPassMessage(const StringRef &Name, int PassNum,
+ StringRef TargetDesc, bool Running) {
+ StringRef Status = Running ? "" : "NOT ";
+ errs() << "BISECT: " << Status << "running pass "
+ << "(" << PassNum << ") " << Name << " on " << TargetDesc << "\n";
+}
+
+bool OptBisect::shouldRunPass(const Pass *P, StringRef IRDescription) {
+ assert(isEnabled());
+
+ return checkPass(P->getPassName(), IRDescription);
+}
+
+bool OptBisect::checkPass(const StringRef PassName,
+ const StringRef TargetDesc) {
+ assert(isEnabled());
+
+ int CurBisectNum = ++LastBisectNum;
+ bool ShouldRun = (BisectLimit == -1 || CurBisectNum <= BisectLimit);
+ printPassMessage(PassName, CurBisectNum, TargetDesc, ShouldRun);
+ return ShouldRun;
+}
+
+const int OptBisect::Disabled;
+
+OptBisect &llvm::getOptBisector() {
+ static OptBisect OptBisector;
+ return OptBisector;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Pass.cpp b/contrib/llvm-project/llvm/lib/IR/Pass.cpp
new file mode 100644
index 000000000000..fe0bfd81a81e
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Pass.cpp
@@ -0,0 +1,291 @@
+//===- Pass.cpp - LLVM Pass Infrastructure Implementation -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LLVM Pass infrastructure. It is primarily
+// responsible with ensuring that passes are executed and batched together
+// optimally.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Pass.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassNameParser.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/OptBisect.h"
+#include "llvm/PassInfo.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+#ifdef EXPENSIVE_CHECKS
+#include "llvm/IR/StructuralHash.h"
+#endif
+
+using namespace llvm;
+
+#define DEBUG_TYPE "ir"
+
+//===----------------------------------------------------------------------===//
+// Pass Implementation
+//
+
+// Force out-of-line virtual method.
+Pass::~Pass() {
+ delete Resolver;
+}
+
+// Force out-of-line virtual method.
+ModulePass::~ModulePass() = default;
+
+Pass *ModulePass::createPrinterPass(raw_ostream &OS,
+ const std::string &Banner) const {
+ return createPrintModulePass(OS, Banner);
+}
+
+PassManagerType ModulePass::getPotentialPassManagerType() const {
+ return PMT_ModulePassManager;
+}
+
+static std::string getDescription(const Module &M) {
+ return "module (" + M.getName().str() + ")";
+}
+
+bool ModulePass::skipModule(Module &M) const {
+ OptPassGate &Gate = M.getContext().getOptPassGate();
+ return Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(M));
+}
+
+bool Pass::mustPreserveAnalysisID(char &AID) const {
+ return Resolver->getAnalysisIfAvailable(&AID) != nullptr;
+}
+
+// dumpPassStructure - Implement the -debug-pass=Structure option
+void Pass::dumpPassStructure(unsigned Offset) {
+ dbgs().indent(Offset*2) << getPassName() << "\n";
+}
+
+/// getPassName - Return a nice clean name for a pass. This usually
+/// implemented in terms of the name that is registered by one of the
+/// Registration templates, but can be overloaded directly.
+StringRef Pass::getPassName() const {
+ AnalysisID AID = getPassID();
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(AID);
+ if (PI)
+ return PI->getPassName();
+ return "Unnamed pass: implement Pass::getPassName()";
+}
+
+void Pass::preparePassManager(PMStack &) {
+ // By default, don't do anything.
+}
+
+PassManagerType Pass::getPotentialPassManagerType() const {
+ // Default implementation.
+ return PMT_Unknown;
+}
+
+void Pass::getAnalysisUsage(AnalysisUsage &) const {
+ // By default, no analysis results are used, all are invalidated.
+}
+
+void Pass::releaseMemory() {
+ // By default, don't do anything.
+}
+
+void Pass::verifyAnalysis() const {
+ // By default, don't do anything.
+}
+
+void *Pass::getAdjustedAnalysisPointer(AnalysisID AID) {
+ return this;
+}
+
+ImmutablePass *Pass::getAsImmutablePass() {
+ return nullptr;
+}
+
+PMDataManager *Pass::getAsPMDataManager() {
+ return nullptr;
+}
+
+void Pass::setResolver(AnalysisResolver *AR) {
+ assert(!Resolver && "Resolver is already set");
+ Resolver = AR;
+}
+
+// print - Print out the internal state of the pass. This is called by Analyze
+// to print out the contents of an analysis. Otherwise it is not necessary to
+// implement this method.
+void Pass::print(raw_ostream &OS, const Module *) const {
+ OS << "Pass::print not implemented for pass: '" << getPassName() << "'!\n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+// dump - call print(cerr);
+LLVM_DUMP_METHOD void Pass::dump() const {
+ print(dbgs(), nullptr);
+}
+#endif
+
+#ifdef EXPENSIVE_CHECKS
+uint64_t Pass::structuralHash(Module &M) const { return StructuralHash(M); }
+
+uint64_t Pass::structuralHash(Function &F) const { return StructuralHash(F); }
+#endif
+
+//===----------------------------------------------------------------------===//
+// ImmutablePass Implementation
+//
+// Force out-of-line virtual method.
+ImmutablePass::~ImmutablePass() = default;
+
+void ImmutablePass::initializePass() {
+ // By default, don't do anything.
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionPass Implementation
+//
+
+Pass *FunctionPass::createPrinterPass(raw_ostream &OS,
+ const std::string &Banner) const {
+ return createPrintFunctionPass(OS, Banner);
+}
+
+PassManagerType FunctionPass::getPotentialPassManagerType() const {
+ return PMT_FunctionPassManager;
+}
+
+static std::string getDescription(const Function &F) {
+ return "function (" + F.getName().str() + ")";
+}
+
+bool FunctionPass::skipFunction(const Function &F) const {
+ OptPassGate &Gate = F.getContext().getOptPassGate();
+ if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(F)))
+ return true;
+
+ if (F.hasOptNone()) {
+ LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' on function "
+ << F.getName() << "\n");
+ return true;
+ }
+ return false;
+}
+
+const PassInfo *Pass::lookupPassInfo(const void *TI) {
+ return PassRegistry::getPassRegistry()->getPassInfo(TI);
+}
+
+const PassInfo *Pass::lookupPassInfo(StringRef Arg) {
+ return PassRegistry::getPassRegistry()->getPassInfo(Arg);
+}
+
+Pass *Pass::createPass(AnalysisID ID) {
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(ID);
+ if (!PI)
+ return nullptr;
+ return PI->createPass();
+}
+
+//===----------------------------------------------------------------------===//
+// Analysis Group Implementation Code
+//===----------------------------------------------------------------------===//
+
+// RegisterAGBase implementation
+
+RegisterAGBase::RegisterAGBase(StringRef Name, const void *InterfaceID,
+ const void *PassID, bool isDefault)
+ : PassInfo(Name, InterfaceID) {
+ PassRegistry::getPassRegistry()->registerAnalysisGroup(InterfaceID, PassID,
+ *this, isDefault);
+}
+
+//===----------------------------------------------------------------------===//
+// PassRegistrationListener implementation
+//
+
+// enumeratePasses - Iterate over the registered passes, calling the
+// passEnumerate callback on each PassInfo object.
+void PassRegistrationListener::enumeratePasses() {
+ PassRegistry::getPassRegistry()->enumerateWith(this);
+}
+
+PassNameParser::PassNameParser(cl::Option &O)
+ : cl::parser<const PassInfo *>(O) {
+ PassRegistry::getPassRegistry()->addRegistrationListener(this);
+}
+
+// This only gets called during static destruction, in which case the
+// PassRegistry will have already been destroyed by llvm_shutdown(). So
+// attempting to remove the registration listener is an error.
+PassNameParser::~PassNameParser() = default;
+
+//===----------------------------------------------------------------------===//
+// AnalysisUsage Class Implementation
+//
+
+namespace {
+
+struct GetCFGOnlyPasses : public PassRegistrationListener {
+ using VectorType = AnalysisUsage::VectorType;
+
+ VectorType &CFGOnlyList;
+
+ GetCFGOnlyPasses(VectorType &L) : CFGOnlyList(L) {}
+
+ void passEnumerate(const PassInfo *P) override {
+ if (P->isCFGOnlyPass())
+ CFGOnlyList.push_back(P->getTypeInfo());
+ }
+};
+
+} // end anonymous namespace
+
+// setPreservesCFG - This function should be called to by the pass, iff they do
+// not:
+//
+// 1. Add or remove basic blocks from the function
+// 2. Modify terminator instructions in any way.
+//
+// This function annotates the AnalysisUsage info object to say that analyses
+// that only depend on the CFG are preserved by this pass.
+void AnalysisUsage::setPreservesCFG() {
+ // Since this transformation doesn't modify the CFG, it preserves all analyses
+ // that only depend on the CFG (like dominators, loop info, etc...)
+ GetCFGOnlyPasses(Preserved).enumeratePasses();
+}
+
+AnalysisUsage &AnalysisUsage::addPreserved(StringRef Arg) {
+ const PassInfo *PI = Pass::lookupPassInfo(Arg);
+ // If the pass exists, preserve it. Otherwise silently do nothing.
+ if (PI)
+ pushUnique(Preserved, PI->getTypeInfo());
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredID(const void *ID) {
+ pushUnique(Required, ID);
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredID(char &ID) {
+ pushUnique(Required, &ID);
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(char &ID) {
+ pushUnique(Required, &ID);
+ pushUnique(RequiredTransitive, &ID);
+ return *this;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/PassInstrumentation.cpp b/contrib/llvm-project/llvm/lib/IR/PassInstrumentation.cpp
new file mode 100644
index 000000000000..d85cefbbe6f7
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/PassInstrumentation.cpp
@@ -0,0 +1,41 @@
+//===- PassInstrumentation.cpp - Pass Instrumentation interface -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the implementation of PassInstrumentation class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PassInstrumentation.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+void PassInstrumentationCallbacks::addClassToPassName(StringRef ClassName,
+ StringRef PassName) {
+ if (ClassToPassName[ClassName].empty())
+ ClassToPassName[ClassName] = PassName.str();
+}
+
+StringRef
+PassInstrumentationCallbacks::getPassNameForClassName(StringRef ClassName) {
+ return ClassToPassName[ClassName];
+}
+
+AnalysisKey PassInstrumentationAnalysis::Key;
+
+bool isSpecialPass(StringRef PassID, const std::vector<StringRef> &Specials) {
+ size_t Pos = PassID.find('<');
+ StringRef Prefix = PassID;
+ if (Pos != StringRef::npos)
+ Prefix = PassID.substr(0, Pos);
+ return any_of(Specials, [Prefix](StringRef S) { return Prefix.endswith(S); });
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/PassManager.cpp b/contrib/llvm-project/llvm/lib/IR/PassManager.cpp
new file mode 100644
index 000000000000..3025c3853d5f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/PassManager.cpp
@@ -0,0 +1,154 @@
+//===- PassManager.cpp - Infrastructure for managing & running IR passes --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/PassManagerImpl.h"
+
+using namespace llvm;
+
+namespace llvm {
+// Explicit template instantiations and specialization defininitions for core
+// template typedefs.
+template class AllAnalysesOn<Module>;
+template class AllAnalysesOn<Function>;
+template class PassManager<Module>;
+template class PassManager<Function>;
+template class AnalysisManager<Module>;
+template class AnalysisManager<Function>;
+template class InnerAnalysisManagerProxy<FunctionAnalysisManager, Module>;
+template class OuterAnalysisManagerProxy<ModuleAnalysisManager, Function>;
+
+template <>
+bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
+ Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv) {
+ // If literally everything is preserved, we're done.
+ if (PA.areAllPreserved())
+ return false; // This is still a valid proxy.
+
+ // If this proxy isn't marked as preserved, then even if the result remains
+ // valid, the key itself may no longer be valid, so we clear everything.
+ //
+ // Note that in order to preserve this proxy, a module pass must ensure that
+ // the FAM has been completely updated to handle the deletion of functions.
+ // Specifically, any FAM-cached results for those functions need to have been
+ // forcibly cleared. When preserved, this proxy will only invalidate results
+ // cached on functions *still in the module* at the end of the module pass.
+ auto PAC = PA.getChecker<FunctionAnalysisManagerModuleProxy>();
+ if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
+ InnerAM->clear();
+ return true;
+ }
+
+ // Directly check if the relevant set is preserved.
+ bool AreFunctionAnalysesPreserved =
+ PA.allAnalysesInSetPreserved<AllAnalysesOn<Function>>();
+
+ // Now walk all the functions to see if any inner analysis invalidation is
+ // necessary.
+ for (Function &F : M) {
+ Optional<PreservedAnalyses> FunctionPA;
+
+ // Check to see whether the preserved set needs to be pruned based on
+ // module-level analysis invalidation that triggers deferred invalidation
+ // registered with the outer analysis manager proxy for this function.
+ if (auto *OuterProxy =
+ InnerAM->getCachedResult<ModuleAnalysisManagerFunctionProxy>(F))
+ for (const auto &OuterInvalidationPair :
+ OuterProxy->getOuterInvalidations()) {
+ AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
+ const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
+ if (Inv.invalidate(OuterAnalysisID, M, PA)) {
+ if (!FunctionPA)
+ FunctionPA = PA;
+ for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
+ FunctionPA->abandon(InnerAnalysisID);
+ }
+ }
+
+ // Check if we needed a custom PA set, and if so we'll need to run the
+ // inner invalidation.
+ if (FunctionPA) {
+ InnerAM->invalidate(F, *FunctionPA);
+ continue;
+ }
+
+ // Otherwise we only need to do invalidation if the original PA set didn't
+ // preserve all function analyses.
+ if (!AreFunctionAnalysesPreserved)
+ InnerAM->invalidate(F, PA);
+ }
+
+ // Return false to indicate that this result is still a valid proxy.
+ return false;
+}
+} // namespace llvm
+
+void ModuleToFunctionPassAdaptor::printPipeline(
+ raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
+ OS << "function";
+ if (EagerlyInvalidate)
+ OS << "<eager-inv>";
+ OS << "(";
+ Pass->printPipeline(OS, MapClassName2PassName);
+ OS << ")";
+}
+
+PreservedAnalyses ModuleToFunctionPassAdaptor::run(Module &M,
+ ModuleAnalysisManager &AM) {
+ FunctionAnalysisManager &FAM =
+ AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+
+ // Request PassInstrumentation from analysis manager, will use it to run
+ // instrumenting callbacks for the passes later.
+ PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(M);
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+
+ // Check the PassInstrumentation's BeforePass callbacks before running the
+ // pass, skip its execution completely if asked to (callback returns
+ // false).
+ if (!PI.runBeforePass<Function>(*Pass, F))
+ continue;
+
+ PreservedAnalyses PassPA;
+ {
+ TimeTraceScope TimeScope(Pass->name(), F.getName());
+ PassPA = Pass->run(F, FAM);
+ }
+
+ PI.runAfterPass(*Pass, F, PassPA);
+
+ // We know that the function pass couldn't have invalidated any other
+ // function's analyses (that's the contract of a function pass), so
+ // directly handle the function analysis manager's invalidation here.
+ FAM.invalidate(F, EagerlyInvalidate ? PreservedAnalyses::none() : PassPA);
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // The FunctionAnalysisManagerModuleProxy is preserved because (we assume)
+ // the function passes we ran didn't add or remove any functions.
+ //
+ // We also preserve all analyses on Functions, because we did all the
+ // invalidation we needed to do above.
+ PA.preserveSet<AllAnalysesOn<Function>>();
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
+ return PA;
+}
+
+AnalysisSetKey CFGAnalyses::SetKey;
+
+AnalysisSetKey PreservedAnalyses::AllAnalysesKey;
diff --git a/contrib/llvm-project/llvm/lib/IR/PassRegistry.cpp b/contrib/llvm-project/llvm/lib/IR/PassRegistry.cpp
new file mode 100644
index 000000000000..6c22fcd34769
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/PassRegistry.cpp
@@ -0,0 +1,120 @@
+//===- PassRegistry.cpp - Pass Registration Implementation ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PassRegistry, with which passes are registered on
+// initialization, and supports the PassManager in dependency resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/PassRegistry.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Pass.h"
+#include "llvm/PassInfo.h"
+#include <cassert>
+#include <memory>
+#include <utility>
+
+using namespace llvm;
+
+PassRegistry *PassRegistry::getPassRegistry() {
+ static PassRegistry PassRegistryObj;
+ return &PassRegistryObj;
+}
+
+//===----------------------------------------------------------------------===//
+// Accessors
+//
+
+PassRegistry::~PassRegistry() = default;
+
+const PassInfo *PassRegistry::getPassInfo(const void *TI) const {
+ sys::SmartScopedReader<true> Guard(Lock);
+ return PassInfoMap.lookup(TI);
+}
+
+const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
+ sys::SmartScopedReader<true> Guard(Lock);
+ return PassInfoStringMap.lookup(Arg);
+}
+
+//===----------------------------------------------------------------------===//
+// Pass Registration mechanism
+//
+
+void PassRegistry::registerPass(const PassInfo &PI, bool ShouldFree) {
+ sys::SmartScopedWriter<true> Guard(Lock);
+ bool Inserted =
+ PassInfoMap.insert(std::make_pair(PI.getTypeInfo(), &PI)).second;
+ assert(Inserted && "Pass registered multiple times!");
+ (void)Inserted;
+ PassInfoStringMap[PI.getPassArgument()] = &PI;
+
+ // Notify any listeners.
+ for (auto *Listener : Listeners)
+ Listener->passRegistered(&PI);
+
+ if (ShouldFree)
+ ToFree.push_back(std::unique_ptr<const PassInfo>(&PI));
+}
+
+void PassRegistry::enumerateWith(PassRegistrationListener *L) {
+ sys::SmartScopedReader<true> Guard(Lock);
+ for (auto PassInfoPair : PassInfoMap)
+ L->passEnumerate(PassInfoPair.second);
+}
+
+/// Analysis Group Mechanisms.
+void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
+ const void *PassID,
+ PassInfo &Registeree, bool isDefault,
+ bool ShouldFree) {
+ PassInfo *InterfaceInfo = const_cast<PassInfo *>(getPassInfo(InterfaceID));
+ if (!InterfaceInfo) {
+ // First reference to Interface, register it now.
+ registerPass(Registeree);
+ InterfaceInfo = &Registeree;
+ }
+ assert(Registeree.isAnalysisGroup() &&
+ "Trying to join an analysis group that is a normal pass!");
+
+ if (PassID) {
+ PassInfo *ImplementationInfo = const_cast<PassInfo *>(getPassInfo(PassID));
+ assert(ImplementationInfo &&
+ "Must register pass before adding to AnalysisGroup!");
+
+ sys::SmartScopedWriter<true> Guard(Lock);
+
+ // Make sure we keep track of the fact that the implementation implements
+ // the interface.
+ ImplementationInfo->addInterfaceImplemented(InterfaceInfo);
+
+ if (isDefault) {
+ assert(InterfaceInfo->getNormalCtor() == nullptr &&
+ "Default implementation for analysis group already specified!");
+ assert(
+ ImplementationInfo->getNormalCtor() &&
+ "Cannot specify pass as default if it does not have a default ctor");
+ InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
+ }
+ }
+
+ if (ShouldFree)
+ ToFree.push_back(std::unique_ptr<const PassInfo>(&Registeree));
+}
+
+void PassRegistry::addRegistrationListener(PassRegistrationListener *L) {
+ sys::SmartScopedWriter<true> Guard(Lock);
+ Listeners.push_back(L);
+}
+
+void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) {
+ sys::SmartScopedWriter<true> Guard(Lock);
+
+ auto I = llvm::find(Listeners, L);
+ Listeners.erase(I);
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/PassTimingInfo.cpp b/contrib/llvm-project/llvm/lib/IR/PassTimingInfo.cpp
new file mode 100644
index 000000000000..a03fafec9fac
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/PassTimingInfo.cpp
@@ -0,0 +1,293 @@
+//===- PassTimingInfo.cpp - LLVM Pass Timing Implementation ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LLVM Pass Timing infrastructure for both
+// new and legacy pass managers.
+//
+// PassTimingInfo Class - This class is used to calculate information about the
+// amount of time each pass takes to execute. This only happens when
+// -time-passes is enabled on the command line.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PassTimingInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/PassInstrumentation.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/TypeName.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "time-passes"
+
+namespace llvm {
+
+bool TimePassesIsEnabled = false;
+bool TimePassesPerRun = false;
+
+static cl::opt<bool, true> EnableTiming(
+ "time-passes", cl::location(TimePassesIsEnabled), cl::Hidden,
+ cl::desc("Time each pass, printing elapsed time for each on exit"));
+
+static cl::opt<bool, true> EnableTimingPerRun(
+ "time-passes-per-run", cl::location(TimePassesPerRun), cl::Hidden,
+ cl::desc("Time each pass run, printing elapsed time for each run on exit"),
+ cl::callback([](const bool &) { TimePassesIsEnabled = true; }));
+
+namespace {
+namespace legacy {
+
+//===----------------------------------------------------------------------===//
+// Legacy pass manager's PassTimingInfo implementation
+
+/// Provides an interface for collecting pass timing information.
+///
+/// It was intended to be generic but now we decided to split
+/// interfaces completely. This is now exclusively for legacy-pass-manager use.
+class PassTimingInfo {
+public:
+ using PassInstanceID = void *;
+
+private:
+ StringMap<unsigned> PassIDCountMap; ///< Map that counts instances of passes
+ DenseMap<PassInstanceID, std::unique_ptr<Timer>> TimingData; ///< timers for pass instances
+ TimerGroup TG;
+
+public:
+ /// Default constructor for yet-inactive timeinfo.
+ /// Use \p init() to activate it.
+ PassTimingInfo();
+
+ /// Print out timing information and release timers.
+ ~PassTimingInfo();
+
+ /// Initializes the static \p TheTimeInfo member to a non-null value when
+ /// -time-passes is enabled. Leaves it null otherwise.
+ ///
+ /// This method may be called multiple times.
+ static void init();
+
+ /// Prints out timing information and then resets the timers.
+ /// By default it uses the stream created by CreateInfoOutputFile().
+ void print(raw_ostream *OutStream = nullptr);
+
+ /// Returns the timer for the specified pass if it exists.
+ Timer *getPassTimer(Pass *, PassInstanceID);
+
+ static PassTimingInfo *TheTimeInfo;
+
+private:
+ Timer *newPassTimer(StringRef PassID, StringRef PassDesc);
+};
+
+static ManagedStatic<sys::SmartMutex<true>> TimingInfoMutex;
+
+PassTimingInfo::PassTimingInfo()
+ : TG("pass", "... Pass execution timing report ...") {}
+
+PassTimingInfo::~PassTimingInfo() {
+ // Deleting the timers accumulates their info into the TG member.
+ // Then TG member is (implicitly) deleted, actually printing the report.
+ TimingData.clear();
+}
+
+void PassTimingInfo::init() {
+ if (!TimePassesIsEnabled || TheTimeInfo)
+ return;
+
+ // Constructed the first time this is called, iff -time-passes is enabled.
+ // This guarantees that the object will be constructed after static globals,
+ // thus it will be destroyed before them.
+ static ManagedStatic<PassTimingInfo> TTI;
+ TheTimeInfo = &*TTI;
+}
+
+/// Prints out timing information and then resets the timers.
+void PassTimingInfo::print(raw_ostream *OutStream) {
+ TG.print(OutStream ? *OutStream : *CreateInfoOutputFile(), true);
+}
+
+Timer *PassTimingInfo::newPassTimer(StringRef PassID, StringRef PassDesc) {
+ unsigned &num = PassIDCountMap[PassID];
+ num++;
+ // Appending description with a pass-instance number for all but the first one
+ std::string PassDescNumbered =
+ num <= 1 ? PassDesc.str() : formatv("{0} #{1}", PassDesc, num).str();
+ return new Timer(PassID, PassDescNumbered, TG);
+}
+
+Timer *PassTimingInfo::getPassTimer(Pass *P, PassInstanceID Pass) {
+ if (P->getAsPMDataManager())
+ return nullptr;
+
+ init();
+ sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
+ std::unique_ptr<Timer> &T = TimingData[Pass];
+
+ if (!T) {
+ StringRef PassName = P->getPassName();
+ StringRef PassArgument;
+ if (const PassInfo *PI = Pass::lookupPassInfo(P->getPassID()))
+ PassArgument = PI->getPassArgument();
+ T.reset(newPassTimer(PassArgument.empty() ? PassName : PassArgument, PassName));
+ }
+ return T.get();
+}
+
+PassTimingInfo *PassTimingInfo::TheTimeInfo;
+} // namespace legacy
+} // namespace
+
+Timer *getPassTimer(Pass *P) {
+ legacy::PassTimingInfo::init();
+ if (legacy::PassTimingInfo::TheTimeInfo)
+ return legacy::PassTimingInfo::TheTimeInfo->getPassTimer(P, P);
+ return nullptr;
+}
+
+/// If timing is enabled, report the times collected up to now and then reset
+/// them.
+void reportAndResetTimings(raw_ostream *OutStream) {
+ if (legacy::PassTimingInfo::TheTimeInfo)
+ legacy::PassTimingInfo::TheTimeInfo->print(OutStream);
+}
+
+//===----------------------------------------------------------------------===//
+// Pass timing handling for the New Pass Manager
+//===----------------------------------------------------------------------===//
+
+/// Returns the timer for the specified pass invocation of \p PassID.
+/// Each time it creates a new timer.
+Timer &TimePassesHandler::getPassTimer(StringRef PassID) {
+ if (!PerRun) {
+ TimerVector &Timers = TimingData[PassID];
+ if (Timers.size() == 0)
+ Timers.emplace_back(new Timer(PassID, PassID, TG));
+ return *Timers.front();
+ }
+
+ // Take a vector of Timers created for this \p PassID and append
+ // one more timer to it.
+ TimerVector &Timers = TimingData[PassID];
+ unsigned Count = Timers.size() + 1;
+
+ std::string FullDesc = formatv("{0} #{1}", PassID, Count).str();
+
+ Timer *T = new Timer(PassID, FullDesc, TG);
+ Timers.emplace_back(T);
+ assert(Count == Timers.size() && "Timers vector not adjusted correctly.");
+
+ return *T;
+}
+
+TimePassesHandler::TimePassesHandler(bool Enabled, bool PerRun)
+ : TG("pass", "... Pass execution timing report ..."), Enabled(Enabled),
+ PerRun(PerRun) {}
+
+TimePassesHandler::TimePassesHandler()
+ : TimePassesHandler(TimePassesIsEnabled, TimePassesPerRun) {}
+
+void TimePassesHandler::setOutStream(raw_ostream &Out) {
+ OutStream = &Out;
+}
+
+void TimePassesHandler::print() {
+ if (!Enabled)
+ return;
+ TG.print(OutStream ? *OutStream : *CreateInfoOutputFile(), true);
+}
+
+LLVM_DUMP_METHOD void TimePassesHandler::dump() const {
+ dbgs() << "Dumping timers for " << getTypeName<TimePassesHandler>()
+ << ":\n\tRunning:\n";
+ for (auto &I : TimingData) {
+ StringRef PassID = I.getKey();
+ const TimerVector& MyTimers = I.getValue();
+ for (unsigned idx = 0; idx < MyTimers.size(); idx++) {
+ const Timer* MyTimer = MyTimers[idx].get();
+ if (MyTimer && MyTimer->isRunning())
+ dbgs() << "\tTimer " << MyTimer << " for pass " << PassID << "(" << idx << ")\n";
+ }
+ }
+ dbgs() << "\tTriggered:\n";
+ for (auto &I : TimingData) {
+ StringRef PassID = I.getKey();
+ const TimerVector& MyTimers = I.getValue();
+ for (unsigned idx = 0; idx < MyTimers.size(); idx++) {
+ const Timer* MyTimer = MyTimers[idx].get();
+ if (MyTimer && MyTimer->hasTriggered() && !MyTimer->isRunning())
+ dbgs() << "\tTimer " << MyTimer << " for pass " << PassID << "(" << idx << ")\n";
+ }
+ }
+}
+
+void TimePassesHandler::startTimer(StringRef PassID) {
+ Timer &MyTimer = getPassTimer(PassID);
+ TimerStack.push_back(&MyTimer);
+ if (!MyTimer.isRunning())
+ MyTimer.startTimer();
+}
+
+void TimePassesHandler::stopTimer(StringRef PassID) {
+ assert(TimerStack.size() > 0 && "empty stack in popTimer");
+ Timer *MyTimer = TimerStack.pop_back_val();
+ assert(MyTimer && "timer should be present");
+ if (MyTimer->isRunning())
+ MyTimer->stopTimer();
+}
+
+void TimePassesHandler::runBeforePass(StringRef PassID) {
+ if (isSpecialPass(PassID,
+ {"PassManager", "PassAdaptor", "AnalysisManagerProxy"}))
+ return;
+
+ startTimer(PassID);
+
+ LLVM_DEBUG(dbgs() << "after runBeforePass(" << PassID << ")\n");
+ LLVM_DEBUG(dump());
+}
+
+void TimePassesHandler::runAfterPass(StringRef PassID) {
+ if (isSpecialPass(PassID,
+ {"PassManager", "PassAdaptor", "AnalysisManagerProxy"}))
+ return;
+
+ stopTimer(PassID);
+
+ LLVM_DEBUG(dbgs() << "after runAfterPass(" << PassID << ")\n");
+ LLVM_DEBUG(dump());
+}
+
+void TimePassesHandler::registerCallbacks(PassInstrumentationCallbacks &PIC) {
+ if (!Enabled)
+ return;
+
+ PIC.registerBeforeNonSkippedPassCallback(
+ [this](StringRef P, Any) { this->runBeforePass(P); });
+ PIC.registerAfterPassCallback(
+ [this](StringRef P, Any, const PreservedAnalyses &) {
+ this->runAfterPass(P);
+ });
+ PIC.registerAfterPassInvalidatedCallback(
+ [this](StringRef P, const PreservedAnalyses &) {
+ this->runAfterPass(P);
+ });
+ PIC.registerBeforeAnalysisCallback(
+ [this](StringRef P, Any) { this->runBeforePass(P); });
+ PIC.registerAfterAnalysisCallback(
+ [this](StringRef P, Any) { this->runAfterPass(P); });
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp b/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp
new file mode 100644
index 000000000000..83b8c93e766f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/PrintPasses.cpp
@@ -0,0 +1,88 @@
+//===- PrintPasses.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PrintPasses.h"
+#include "llvm/Support/CommandLine.h"
+#include <unordered_set>
+
+using namespace llvm;
+
+// Print IR out before/after specified passes.
+static cl::list<std::string>
+ PrintBefore("print-before",
+ llvm::cl::desc("Print IR before specified passes"),
+ cl::CommaSeparated, cl::Hidden);
+
+static cl::list<std::string>
+ PrintAfter("print-after", llvm::cl::desc("Print IR after specified passes"),
+ cl::CommaSeparated, cl::Hidden);
+
+static cl::opt<bool> PrintBeforeAll("print-before-all",
+ llvm::cl::desc("Print IR before each pass"),
+ cl::init(false), cl::Hidden);
+static cl::opt<bool> PrintAfterAll("print-after-all",
+ llvm::cl::desc("Print IR after each pass"),
+ cl::init(false), cl::Hidden);
+
+static cl::opt<bool>
+ PrintModuleScope("print-module-scope",
+ cl::desc("When printing IR for print-[before|after]{-all} "
+ "always print a module IR"),
+ cl::init(false), cl::Hidden);
+
+static cl::list<std::string>
+ PrintFuncsList("filter-print-funcs", cl::value_desc("function names"),
+ cl::desc("Only print IR for functions whose name "
+ "match this for all print-[before|after][-all] "
+ "options"),
+ cl::CommaSeparated, cl::Hidden);
+
+/// This is a helper to determine whether to print IR before or
+/// after a pass.
+
+bool llvm::shouldPrintBeforeSomePass() {
+ return PrintBeforeAll || !PrintBefore.empty();
+}
+
+bool llvm::shouldPrintAfterSomePass() {
+ return PrintAfterAll || !PrintAfter.empty();
+}
+
+static bool shouldPrintBeforeOrAfterPass(StringRef PassID,
+ ArrayRef<std::string> PassesToPrint) {
+ return llvm::is_contained(PassesToPrint, PassID);
+}
+
+bool llvm::shouldPrintBeforeAll() { return PrintBeforeAll; }
+
+bool llvm::shouldPrintAfterAll() { return PrintAfterAll; }
+
+bool llvm::shouldPrintBeforePass(StringRef PassID) {
+ return PrintBeforeAll || shouldPrintBeforeOrAfterPass(PassID, PrintBefore);
+}
+
+bool llvm::shouldPrintAfterPass(StringRef PassID) {
+ return PrintAfterAll || shouldPrintBeforeOrAfterPass(PassID, PrintAfter);
+}
+
+std::vector<std::string> llvm::printBeforePasses() {
+ return std::vector<std::string>(PrintBefore);
+}
+
+std::vector<std::string> llvm::printAfterPasses() {
+ return std::vector<std::string>(PrintAfter);
+}
+
+bool llvm::forcePrintModuleIR() { return PrintModuleScope; }
+
+bool llvm::isFunctionInPrintList(StringRef FunctionName) {
+ static std::unordered_set<std::string> PrintFuncNames(PrintFuncsList.begin(),
+ PrintFuncsList.end());
+ return PrintFuncNames.empty() ||
+ PrintFuncNames.count(std::string(FunctionName));
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/ProfileSummary.cpp b/contrib/llvm-project/llvm/lib/IR/ProfileSummary.cpp
new file mode 100644
index 000000000000..9f7335ecbe44
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ProfileSummary.cpp
@@ -0,0 +1,267 @@
+//=-- Profilesummary.cpp - Profile summary support --------------------------=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for converting profile summary data from/to
+// metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/ProfileSummary.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Format.h"
+
+using namespace llvm;
+
+// Return an MDTuple with two elements. The first element is a string Key and
+// the second is a uint64_t Value.
+static Metadata *getKeyValMD(LLVMContext &Context, const char *Key,
+ uint64_t Val) {
+ Type *Int64Ty = Type::getInt64Ty(Context);
+ Metadata *Ops[2] = {MDString::get(Context, Key),
+ ConstantAsMetadata::get(ConstantInt::get(Int64Ty, Val))};
+ return MDTuple::get(Context, Ops);
+}
+
+static Metadata *getKeyFPValMD(LLVMContext &Context, const char *Key,
+ double Val) {
+ Type *DoubleTy = Type::getDoubleTy(Context);
+ Metadata *Ops[2] = {MDString::get(Context, Key),
+ ConstantAsMetadata::get(ConstantFP::get(DoubleTy, Val))};
+ return MDTuple::get(Context, Ops);
+}
+
+// Return an MDTuple with two elements. The first element is a string Key and
+// the second is a string Value.
+static Metadata *getKeyValMD(LLVMContext &Context, const char *Key,
+ const char *Val) {
+ Metadata *Ops[2] = {MDString::get(Context, Key), MDString::get(Context, Val)};
+ return MDTuple::get(Context, Ops);
+}
+
+// This returns an MDTuple representing the detiled summary. The tuple has two
+// elements: a string "DetailedSummary" and an MDTuple representing the value
+// of the detailed summary. Each element of this tuple is again an MDTuple whose
+// elements are the (Cutoff, MinCount, NumCounts) triplet of the
+// DetailedSummaryEntry.
+Metadata *ProfileSummary::getDetailedSummaryMD(LLVMContext &Context) {
+ std::vector<Metadata *> Entries;
+ Type *Int32Ty = Type::getInt32Ty(Context);
+ Type *Int64Ty = Type::getInt64Ty(Context);
+ for (auto &Entry : DetailedSummary) {
+ Metadata *EntryMD[3] = {
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Entry.Cutoff)),
+ ConstantAsMetadata::get(ConstantInt::get(Int64Ty, Entry.MinCount)),
+ ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Entry.NumCounts))};
+ Entries.push_back(MDTuple::get(Context, EntryMD));
+ }
+ Metadata *Ops[2] = {MDString::get(Context, "DetailedSummary"),
+ MDTuple::get(Context, Entries)};
+ return MDTuple::get(Context, Ops);
+}
+
+// This returns an MDTuple representing this ProfileSummary object. The first
+// entry of this tuple is another MDTuple of two elements: a string
+// "ProfileFormat" and a string representing the format ("InstrProf" or
+// "SampleProfile"). The rest of the elements of the outer MDTuple are specific
+// to the kind of profile summary as returned by getFormatSpecificMD.
+// IsPartialProfile is an optional field and \p AddPartialField will decide
+// whether to add a field for it.
+// PartialProfileRatio is an optional field and \p AddPartialProfileRatioField
+// will decide whether to add a field for it.
+Metadata *ProfileSummary::getMD(LLVMContext &Context, bool AddPartialField,
+ bool AddPartialProfileRatioField) {
+ const char *KindStr[3] = {"InstrProf", "CSInstrProf", "SampleProfile"};
+ SmallVector<Metadata *, 16> Components;
+ Components.push_back(getKeyValMD(Context, "ProfileFormat", KindStr[PSK]));
+ Components.push_back(getKeyValMD(Context, "TotalCount", getTotalCount()));
+ Components.push_back(getKeyValMD(Context, "MaxCount", getMaxCount()));
+ Components.push_back(
+ getKeyValMD(Context, "MaxInternalCount", getMaxInternalCount()));
+ Components.push_back(
+ getKeyValMD(Context, "MaxFunctionCount", getMaxFunctionCount()));
+ Components.push_back(getKeyValMD(Context, "NumCounts", getNumCounts()));
+ Components.push_back(getKeyValMD(Context, "NumFunctions", getNumFunctions()));
+ if (AddPartialField)
+ Components.push_back(
+ getKeyValMD(Context, "IsPartialProfile", isPartialProfile()));
+ if (AddPartialProfileRatioField)
+ Components.push_back(getKeyFPValMD(Context, "PartialProfileRatio",
+ getPartialProfileRatio()));
+ Components.push_back(getDetailedSummaryMD(Context));
+ return MDTuple::get(Context, Components);
+}
+
+// Get the value metadata for the input MD/Key.
+static ConstantAsMetadata *getValMD(MDTuple *MD, const char *Key) {
+ if (!MD)
+ return nullptr;
+ if (MD->getNumOperands() != 2)
+ return nullptr;
+ MDString *KeyMD = dyn_cast<MDString>(MD->getOperand(0));
+ ConstantAsMetadata *ValMD = dyn_cast<ConstantAsMetadata>(MD->getOperand(1));
+ if (!KeyMD || !ValMD)
+ return nullptr;
+ if (!KeyMD->getString().equals(Key))
+ return nullptr;
+ return ValMD;
+}
+
+// Parse an MDTuple representing (Key, Val) pair.
+static bool getVal(MDTuple *MD, const char *Key, uint64_t &Val) {
+ if (auto *ValMD = getValMD(MD, Key)) {
+ Val = cast<ConstantInt>(ValMD->getValue())->getZExtValue();
+ return true;
+ }
+ return false;
+}
+
+static bool getVal(MDTuple *MD, const char *Key, double &Val) {
+ if (auto *ValMD = getValMD(MD, Key)) {
+ Val = cast<ConstantFP>(ValMD->getValue())->getValueAPF().convertToDouble();
+ return true;
+ }
+ return false;
+}
+
+// Check if an MDTuple represents a (Key, Val) pair.
+static bool isKeyValuePair(MDTuple *MD, const char *Key, const char *Val) {
+ if (!MD || MD->getNumOperands() != 2)
+ return false;
+ MDString *KeyMD = dyn_cast<MDString>(MD->getOperand(0));
+ MDString *ValMD = dyn_cast<MDString>(MD->getOperand(1));
+ if (!KeyMD || !ValMD)
+ return false;
+ if (!KeyMD->getString().equals(Key) || !ValMD->getString().equals(Val))
+ return false;
+ return true;
+}
+
+// Parse an MDTuple representing detailed summary.
+static bool getSummaryFromMD(MDTuple *MD, SummaryEntryVector &Summary) {
+ if (!MD || MD->getNumOperands() != 2)
+ return false;
+ MDString *KeyMD = dyn_cast<MDString>(MD->getOperand(0));
+ if (!KeyMD || !KeyMD->getString().equals("DetailedSummary"))
+ return false;
+ MDTuple *EntriesMD = dyn_cast<MDTuple>(MD->getOperand(1));
+ if (!EntriesMD)
+ return false;
+ for (auto &&MDOp : EntriesMD->operands()) {
+ MDTuple *EntryMD = dyn_cast<MDTuple>(MDOp);
+ if (!EntryMD || EntryMD->getNumOperands() != 3)
+ return false;
+ ConstantAsMetadata *Op0 =
+ dyn_cast<ConstantAsMetadata>(EntryMD->getOperand(0));
+ ConstantAsMetadata *Op1 =
+ dyn_cast<ConstantAsMetadata>(EntryMD->getOperand(1));
+ ConstantAsMetadata *Op2 =
+ dyn_cast<ConstantAsMetadata>(EntryMD->getOperand(2));
+
+ if (!Op0 || !Op1 || !Op2)
+ return false;
+ Summary.emplace_back(cast<ConstantInt>(Op0->getValue())->getZExtValue(),
+ cast<ConstantInt>(Op1->getValue())->getZExtValue(),
+ cast<ConstantInt>(Op2->getValue())->getZExtValue());
+ }
+ return true;
+}
+
+// Get the value of an optional field. Increment 'Idx' if it was present. Return
+// true if we can move onto the next field.
+template <typename ValueType>
+static bool getOptionalVal(MDTuple *Tuple, unsigned &Idx, const char *Key,
+ ValueType &Value) {
+ if (getVal(dyn_cast<MDTuple>(Tuple->getOperand(Idx)), Key, Value)) {
+ Idx++;
+ // Need to make sure when the key is present, we won't step over the bound
+ // of Tuple operand array. Since (non-optional) DetailedSummary always comes
+ // last, the next entry in the tuple operand array must exist.
+ return Idx < Tuple->getNumOperands();
+ }
+ // It was absent, keep going.
+ return true;
+}
+
+ProfileSummary *ProfileSummary::getFromMD(Metadata *MD) {
+ MDTuple *Tuple = dyn_cast_or_null<MDTuple>(MD);
+ if (!Tuple || Tuple->getNumOperands() < 8 || Tuple->getNumOperands() > 10)
+ return nullptr;
+
+ unsigned I = 0;
+ auto &FormatMD = Tuple->getOperand(I++);
+ ProfileSummary::Kind SummaryKind;
+ if (isKeyValuePair(dyn_cast_or_null<MDTuple>(FormatMD), "ProfileFormat",
+ "SampleProfile"))
+ SummaryKind = PSK_Sample;
+ else if (isKeyValuePair(dyn_cast_or_null<MDTuple>(FormatMD), "ProfileFormat",
+ "InstrProf"))
+ SummaryKind = PSK_Instr;
+ else if (isKeyValuePair(dyn_cast_or_null<MDTuple>(FormatMD), "ProfileFormat",
+ "CSInstrProf"))
+ SummaryKind = PSK_CSInstr;
+ else
+ return nullptr;
+
+ uint64_t NumCounts, TotalCount, NumFunctions, MaxFunctionCount, MaxCount,
+ MaxInternalCount;
+ if (!getVal(dyn_cast<MDTuple>(Tuple->getOperand(I++)), "TotalCount",
+ TotalCount))
+ return nullptr;
+ if (!getVal(dyn_cast<MDTuple>(Tuple->getOperand(I++)), "MaxCount", MaxCount))
+ return nullptr;
+ if (!getVal(dyn_cast<MDTuple>(Tuple->getOperand(I++)), "MaxInternalCount",
+ MaxInternalCount))
+ return nullptr;
+ if (!getVal(dyn_cast<MDTuple>(Tuple->getOperand(I++)), "MaxFunctionCount",
+ MaxFunctionCount))
+ return nullptr;
+ if (!getVal(dyn_cast<MDTuple>(Tuple->getOperand(I++)), "NumCounts",
+ NumCounts))
+ return nullptr;
+ if (!getVal(dyn_cast<MDTuple>(Tuple->getOperand(I++)), "NumFunctions",
+ NumFunctions))
+ return nullptr;
+
+ // Optional fields. Need to initialize because the fields are optional.
+ uint64_t IsPartialProfile = 0;
+ if (!getOptionalVal(Tuple, I, "IsPartialProfile", IsPartialProfile))
+ return nullptr;
+ double PartialProfileRatio = 0;
+ if (!getOptionalVal(Tuple, I, "PartialProfileRatio", PartialProfileRatio))
+ return nullptr;
+
+ SummaryEntryVector Summary;
+ if (!getSummaryFromMD(dyn_cast<MDTuple>(Tuple->getOperand(I++)), Summary))
+ return nullptr;
+ return new ProfileSummary(SummaryKind, std::move(Summary), TotalCount,
+ MaxCount, MaxInternalCount, MaxFunctionCount,
+ NumCounts, NumFunctions, IsPartialProfile,
+ PartialProfileRatio);
+}
+
+void ProfileSummary::printSummary(raw_ostream &OS) const {
+ OS << "Total functions: " << NumFunctions << "\n";
+ OS << "Maximum function count: " << MaxFunctionCount << "\n";
+ OS << "Maximum block count: " << MaxCount << "\n";
+ OS << "Total number of blocks: " << NumCounts << "\n";
+ OS << "Total count: " << TotalCount << "\n";
+}
+
+void ProfileSummary::printDetailedSummary(raw_ostream &OS) const {
+ OS << "Detailed summary:\n";
+ for (const auto &Entry : DetailedSummary) {
+ OS << Entry.NumCounts << " blocks with count >= " << Entry.MinCount
+ << " account for "
+ << format("%0.6g", (float)Entry.Cutoff / Scale * 100)
+ << " percentage of the total counts.\n";
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/PseudoProbe.cpp b/contrib/llvm-project/llvm/lib/IR/PseudoProbe.cpp
new file mode 100644
index 000000000000..5cad887b295d
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/PseudoProbe.cpp
@@ -0,0 +1,101 @@
+//===- PseudoProbe.cpp - Pseudo Probe Helpers -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the helpers to manipulate pseudo probe IR intrinsic
+// calls.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PseudoProbe.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+
+using namespace llvm;
+
+namespace llvm {
+
+Optional<PseudoProbe> extractProbeFromDiscriminator(const Instruction &Inst) {
+ assert(isa<CallBase>(&Inst) && !isa<IntrinsicInst>(&Inst) &&
+ "Only call instructions should have pseudo probe encodes as their "
+ "Dwarf discriminators");
+ if (const DebugLoc &DLoc = Inst.getDebugLoc()) {
+ const DILocation *DIL = DLoc;
+ auto Discriminator = DIL->getDiscriminator();
+ if (DILocation::isPseudoProbeDiscriminator(Discriminator)) {
+ PseudoProbe Probe;
+ Probe.Id =
+ PseudoProbeDwarfDiscriminator::extractProbeIndex(Discriminator);
+ Probe.Type =
+ PseudoProbeDwarfDiscriminator::extractProbeType(Discriminator);
+ Probe.Attr =
+ PseudoProbeDwarfDiscriminator::extractProbeAttributes(Discriminator);
+ Probe.Factor =
+ PseudoProbeDwarfDiscriminator::extractProbeFactor(Discriminator) /
+ (float)PseudoProbeDwarfDiscriminator::FullDistributionFactor;
+ return Probe;
+ }
+ }
+ return None;
+}
+
+Optional<PseudoProbe> extractProbe(const Instruction &Inst) {
+ if (const auto *II = dyn_cast<PseudoProbeInst>(&Inst)) {
+ PseudoProbe Probe;
+ Probe.Id = II->getIndex()->getZExtValue();
+ Probe.Type = (uint32_t)PseudoProbeType::Block;
+ Probe.Attr = II->getAttributes()->getZExtValue();
+ Probe.Factor = II->getFactor()->getZExtValue() /
+ (float)PseudoProbeFullDistributionFactor;
+ return Probe;
+ }
+
+ if (isa<CallBase>(&Inst) && !isa<IntrinsicInst>(&Inst))
+ return extractProbeFromDiscriminator(Inst);
+
+ return None;
+}
+
+void setProbeDistributionFactor(Instruction &Inst, float Factor) {
+ assert(Factor >= 0 && Factor <= 1 &&
+ "Distribution factor must be in [0, 1.0]");
+ if (auto *II = dyn_cast<PseudoProbeInst>(&Inst)) {
+ IRBuilder<> Builder(&Inst);
+ uint64_t IntFactor = PseudoProbeFullDistributionFactor;
+ if (Factor < 1)
+ IntFactor *= Factor;
+ auto OrigFactor = II->getFactor()->getZExtValue();
+ if (IntFactor != OrigFactor)
+ II->replaceUsesOfWith(II->getFactor(), Builder.getInt64(IntFactor));
+ } else if (isa<CallBase>(&Inst) && !isa<IntrinsicInst>(&Inst)) {
+ if (const DebugLoc &DLoc = Inst.getDebugLoc()) {
+ const DILocation *DIL = DLoc;
+ auto Discriminator = DIL->getDiscriminator();
+ if (DILocation::isPseudoProbeDiscriminator(Discriminator)) {
+ auto Index =
+ PseudoProbeDwarfDiscriminator::extractProbeIndex(Discriminator);
+ auto Type =
+ PseudoProbeDwarfDiscriminator::extractProbeType(Discriminator);
+ auto Attr = PseudoProbeDwarfDiscriminator::extractProbeAttributes(
+ Discriminator);
+ // Round small factors to 0 to avoid over-counting.
+ uint32_t IntFactor =
+ PseudoProbeDwarfDiscriminator::FullDistributionFactor;
+ if (Factor < 1)
+ IntFactor *= Factor;
+ uint32_t V = PseudoProbeDwarfDiscriminator::packProbeData(
+ Index, Type, Attr, IntFactor);
+ DIL = DIL->cloneWithDiscriminator(V);
+ Inst.setDebugLoc(DIL);
+ }
+ }
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/ReplaceConstant.cpp b/contrib/llvm-project/llvm/lib/IR/ReplaceConstant.cpp
new file mode 100644
index 000000000000..069da26e63b1
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ReplaceConstant.cpp
@@ -0,0 +1,136 @@
+//===- ReplaceConstant.cpp - Replace LLVM constant expression--------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a utility function for replacing LLVM constant
+// expressions by instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/ReplaceConstant.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/ValueMap.h"
+
+namespace llvm {
+
+void convertConstantExprsToInstructions(Instruction *I, ConstantExpr *CE,
+ SmallPtrSetImpl<Instruction *> *Insts) {
+ // Collect all reachable paths to CE from constant exprssion operands of I.
+ std::map<Use *, std::vector<std::vector<ConstantExpr *>>> CEPaths;
+ collectConstantExprPaths(I, CE, CEPaths);
+
+ // Convert all constant expressions to instructions which are collected at
+ // CEPaths.
+ convertConstantExprsToInstructions(I, CEPaths, Insts);
+}
+
+void convertConstantExprsToInstructions(
+ Instruction *I,
+ std::map<Use *, std::vector<std::vector<ConstantExpr *>>> &CEPaths,
+ SmallPtrSetImpl<Instruction *> *Insts) {
+ ValueMap<ConstantExpr *, Instruction *> Visited;
+
+ for (Use &U : I->operands()) {
+ // The operand U is either not a constant expression operand or the
+ // constant expression paths do not belong to U, ignore U.
+ if (!CEPaths.count(&U))
+ continue;
+
+ // If the instruction I is a PHI instruction, then fix the instruction
+ // insertion point to the entry of the incoming basic block for operand U.
+ auto *BI = I;
+ if (auto *Phi = dyn_cast<PHINode>(I)) {
+ BasicBlock *BB = Phi->getIncomingBlock(U);
+ BI = &(*(BB->getFirstInsertionPt()));
+ }
+
+ // Go through all the paths associated with operand U, and convert all the
+ // constant expressions along all the paths to corresponding instructions.
+ auto *II = I;
+ auto &Paths = CEPaths[&U];
+ for (auto &Path : Paths) {
+ for (auto *CE : Path) {
+ // Instruction which is equivalent to CE.
+ Instruction *NI = nullptr;
+
+ if (!Visited.count(CE)) {
+ // CE is encountered first time, convert it into a corresponding
+ // instruction NI, and appropriately insert NI before the parent
+ // instruction.
+ NI = CE->getAsInstruction(BI);
+
+ // Mark CE as visited by mapping CE to NI.
+ Visited[CE] = NI;
+
+ // If required collect NI.
+ if (Insts)
+ Insts->insert(NI);
+ } else {
+ // We had already encountered CE, the correponding instruction already
+ // exist, use it to replace CE.
+ NI = Visited[CE];
+ }
+
+ assert(NI && "Expected an instruction corresponding to constant "
+ "expression.");
+
+ // Replace all uses of constant expression CE by the corresponding
+ // instruction NI within the current parent instruction.
+ II->replaceUsesOfWith(CE, NI);
+ BI = II = NI;
+ }
+ }
+ }
+
+ // Remove all converted constant expressions which are dead by now.
+ for (auto Item : Visited)
+ Item.first->removeDeadConstantUsers();
+}
+
+void collectConstantExprPaths(
+ Instruction *I, ConstantExpr *CE,
+ std::map<Use *, std::vector<std::vector<ConstantExpr *>>> &CEPaths) {
+ for (Use &U : I->operands()) {
+ // If the operand U is not a constant expression operand, then ignore it.
+ auto *CE2 = dyn_cast<ConstantExpr>(U.get());
+ if (!CE2)
+ continue;
+
+ // Holds all reachable paths from CE2 to CE.
+ std::vector<std::vector<ConstantExpr *>> Paths;
+
+ // Collect all reachable paths from CE2 to CE.
+ std::vector<ConstantExpr *> Path{CE2};
+ std::vector<std::vector<ConstantExpr *>> Stack{Path};
+ while (!Stack.empty()) {
+ std::vector<ConstantExpr *> TPath = Stack.back();
+ Stack.pop_back();
+ auto *CE3 = TPath.back();
+
+ if (CE3 == CE) {
+ Paths.push_back(TPath);
+ continue;
+ }
+
+ for (auto &UU : CE3->operands()) {
+ if (auto *CE4 = dyn_cast<ConstantExpr>(UU.get())) {
+ std::vector<ConstantExpr *> NPath(TPath.begin(), TPath.end());
+ NPath.push_back(CE4);
+ Stack.push_back(NPath);
+ }
+ }
+ }
+
+ // Associate all the collected paths with U, and save it.
+ if (!Paths.empty())
+ CEPaths[&U] = Paths;
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/SSAContext.cpp b/contrib/llvm-project/llvm/lib/IR/SSAContext.cpp
new file mode 100644
index 000000000000..5b865692dd7f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/SSAContext.cpp
@@ -0,0 +1,48 @@
+//===- SSAContext.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a specialization of the GenericSSAContext<X>
+/// template class for LLVM IR.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/SSAContext.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+BasicBlock *SSAContext::getEntryBlock(Function &F) {
+ return &F.getEntryBlock();
+}
+
+void SSAContext::setFunction(Function &Fn) { F = &Fn; }
+
+Printable SSAContext::print(Value *V) const {
+ return Printable([V](raw_ostream &Out) { V->print(Out); });
+}
+
+Printable SSAContext::print(Instruction *Inst) const {
+ return print(cast<Value>(Inst));
+}
+
+Printable SSAContext::print(BasicBlock *BB) const {
+ if (BB->hasName())
+ return Printable([BB](raw_ostream &Out) { Out << BB->getName(); });
+
+ return Printable([BB](raw_ostream &Out) {
+ ModuleSlotTracker MST{BB->getParent()->getParent(), false};
+ MST.incorporateFunction(*BB->getParent());
+ Out << MST.getLocalSlot(BB);
+ });
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/SafepointIRVerifier.cpp b/contrib/llvm-project/llvm/lib/IR/SafepointIRVerifier.cpp
new file mode 100644
index 000000000000..5d3fa28f7d0a
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/SafepointIRVerifier.cpp
@@ -0,0 +1,911 @@
+//===-- SafepointIRVerifier.cpp - Verify gc.statepoint invariants ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Run a basic correctness check on the IR to ensure that Safepoints - if
+// they've been inserted - were inserted correctly. In particular, look for use
+// of non-relocated values after a safepoint. It's primary use is to check the
+// correctness of safepoint insertion immediately after insertion, but it can
+// also be used to verify that later transforms have not found a way to break
+// safepoint semenatics.
+//
+// In its current form, this verify checks a property which is sufficient, but
+// not neccessary for correctness. There are some cases where an unrelocated
+// pointer can be used after the safepoint. Consider this example:
+//
+// a = ...
+// b = ...
+// (a',b') = safepoint(a,b)
+// c = cmp eq a b
+// br c, ..., ....
+//
+// Because it is valid to reorder 'c' above the safepoint, this is legal. In
+// practice, this is a somewhat uncommon transform, but CodeGenPrep does create
+// idioms like this. The verifier knows about these cases and avoids reporting
+// false positives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/SafepointIRVerifier.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Value.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "safepoint-ir-verifier"
+
+using namespace llvm;
+
+/// This option is used for writing test cases. Instead of crashing the program
+/// when verification fails, report a message to the console (for FileCheck
+/// usage) and continue execution as if nothing happened.
+static cl::opt<bool> PrintOnly("safepoint-ir-verifier-print-only",
+ cl::init(false));
+
+namespace {
+
+/// This CFG Deadness finds dead blocks and edges. Algorithm starts with a set
+/// of blocks unreachable from entry then propagates deadness using foldable
+/// conditional branches without modifying CFG. So GVN does but it changes CFG
+/// by splitting critical edges. In most cases passes rely on SimplifyCFG to
+/// clean up dead blocks, but in some cases, like verification or loop passes
+/// it's not possible.
+class CFGDeadness {
+ const DominatorTree *DT = nullptr;
+ SetVector<const BasicBlock *> DeadBlocks;
+ SetVector<const Use *> DeadEdges; // Contains all dead edges from live blocks.
+
+public:
+ /// Return the edge that coresponds to the predecessor.
+ static const Use& getEdge(const_pred_iterator &PredIt) {
+ auto &PU = PredIt.getUse();
+ return PU.getUser()->getOperandUse(PU.getOperandNo());
+ }
+
+ /// Return true if there is at least one live edge that corresponds to the
+ /// basic block InBB listed in the phi node.
+ bool hasLiveIncomingEdge(const PHINode *PN, const BasicBlock *InBB) const {
+ assert(!isDeadBlock(InBB) && "block must be live");
+ const BasicBlock* BB = PN->getParent();
+ bool Listed = false;
+ for (const_pred_iterator PredIt(BB), End(BB, true); PredIt != End; ++PredIt) {
+ if (InBB == *PredIt) {
+ if (!isDeadEdge(&getEdge(PredIt)))
+ return true;
+ Listed = true;
+ }
+ }
+ (void)Listed;
+ assert(Listed && "basic block is not found among incoming blocks");
+ return false;
+ }
+
+
+ bool isDeadBlock(const BasicBlock *BB) const {
+ return DeadBlocks.count(BB);
+ }
+
+ bool isDeadEdge(const Use *U) const {
+ assert(cast<Instruction>(U->getUser())->isTerminator() &&
+ "edge must be operand of terminator");
+ assert(cast_or_null<BasicBlock>(U->get()) &&
+ "edge must refer to basic block");
+ assert(!isDeadBlock(cast<Instruction>(U->getUser())->getParent()) &&
+ "isDeadEdge() must be applied to edge from live block");
+ return DeadEdges.count(U);
+ }
+
+ bool hasLiveIncomingEdges(const BasicBlock *BB) const {
+ // Check if all incoming edges are dead.
+ for (const_pred_iterator PredIt(BB), End(BB, true); PredIt != End; ++PredIt) {
+ auto &PU = PredIt.getUse();
+ const Use &U = PU.getUser()->getOperandUse(PU.getOperandNo());
+ if (!isDeadBlock(*PredIt) && !isDeadEdge(&U))
+ return true; // Found a live edge.
+ }
+ return false;
+ }
+
+ void processFunction(const Function &F, const DominatorTree &DT) {
+ this->DT = &DT;
+
+ // Start with all blocks unreachable from entry.
+ for (const BasicBlock &BB : F)
+ if (!DT.isReachableFromEntry(&BB))
+ DeadBlocks.insert(&BB);
+
+ // Top-down walk of the dominator tree
+ ReversePostOrderTraversal<const Function *> RPOT(&F);
+ for (const BasicBlock *BB : RPOT) {
+ const Instruction *TI = BB->getTerminator();
+ assert(TI && "blocks must be well formed");
+
+ // For conditional branches, we can perform simple conditional propagation on
+ // the condition value itself.
+ const BranchInst *BI = dyn_cast<BranchInst>(TI);
+ if (!BI || !BI->isConditional() || !isa<Constant>(BI->getCondition()))
+ continue;
+
+ // If a branch has two identical successors, we cannot declare either dead.
+ if (BI->getSuccessor(0) == BI->getSuccessor(1))
+ continue;
+
+ ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
+ if (!Cond)
+ continue;
+
+ addDeadEdge(BI->getOperandUse(Cond->getZExtValue() ? 1 : 2));
+ }
+ }
+
+protected:
+ void addDeadBlock(const BasicBlock *BB) {
+ SmallVector<const BasicBlock *, 4> NewDead;
+ SmallSetVector<const BasicBlock *, 4> DF;
+
+ NewDead.push_back(BB);
+ while (!NewDead.empty()) {
+ const BasicBlock *D = NewDead.pop_back_val();
+ if (isDeadBlock(D))
+ continue;
+
+ // All blocks dominated by D are dead.
+ SmallVector<BasicBlock *, 8> Dom;
+ DT->getDescendants(const_cast<BasicBlock*>(D), Dom);
+ // Do not need to mark all in and out edges dead
+ // because BB is marked dead and this is enough
+ // to run further.
+ DeadBlocks.insert(Dom.begin(), Dom.end());
+
+ // Figure out the dominance-frontier(D).
+ for (BasicBlock *B : Dom)
+ for (BasicBlock *S : successors(B))
+ if (!isDeadBlock(S) && !hasLiveIncomingEdges(S))
+ NewDead.push_back(S);
+ }
+ }
+
+ void addDeadEdge(const Use &DeadEdge) {
+ if (!DeadEdges.insert(&DeadEdge))
+ return;
+
+ BasicBlock *BB = cast_or_null<BasicBlock>(DeadEdge.get());
+ if (hasLiveIncomingEdges(BB))
+ return;
+
+ addDeadBlock(BB);
+ }
+};
+} // namespace
+
+static void Verify(const Function &F, const DominatorTree &DT,
+ const CFGDeadness &CD);
+
+namespace llvm {
+PreservedAnalyses SafepointIRVerifierPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ const auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
+ CFGDeadness CD;
+ CD.processFunction(F, DT);
+ Verify(F, DT, CD);
+ return PreservedAnalyses::all();
+}
+} // namespace llvm
+
+namespace {
+
+struct SafepointIRVerifier : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ SafepointIRVerifier() : FunctionPass(ID) {
+ initializeSafepointIRVerifierPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ CFGDeadness CD;
+ CD.processFunction(F, DT);
+ Verify(F, DT, CD);
+ return false; // no modifications
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequiredID(DominatorTreeWrapperPass::ID);
+ AU.setPreservesAll();
+ }
+
+ StringRef getPassName() const override { return "safepoint verifier"; }
+};
+} // namespace
+
+void llvm::verifySafepointIR(Function &F) {
+ SafepointIRVerifier pass;
+ pass.runOnFunction(F);
+}
+
+char SafepointIRVerifier::ID = 0;
+
+FunctionPass *llvm::createSafepointIRVerifierPass() {
+ return new SafepointIRVerifier();
+}
+
+INITIALIZE_PASS_BEGIN(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, false)
+
+static bool isGCPointerType(Type *T) {
+ if (auto *PT = dyn_cast<PointerType>(T))
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
+ // GC managed heap. We know that a pointer into this heap needs to be
+ // updated and that no other pointer does.
+ return (1 == PT->getAddressSpace());
+ return false;
+}
+
+static bool containsGCPtrType(Type *Ty) {
+ if (isGCPointerType(Ty))
+ return true;
+ if (VectorType *VT = dyn_cast<VectorType>(Ty))
+ return isGCPointerType(VT->getScalarType());
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
+ return containsGCPtrType(AT->getElementType());
+ if (StructType *ST = dyn_cast<StructType>(Ty))
+ return llvm::any_of(ST->elements(), containsGCPtrType);
+ return false;
+}
+
+// Debugging aid -- prints a [Begin, End) range of values.
+template<typename IteratorTy>
+static void PrintValueSet(raw_ostream &OS, IteratorTy Begin, IteratorTy End) {
+ OS << "[ ";
+ while (Begin != End) {
+ OS << **Begin << " ";
+ ++Begin;
+ }
+ OS << "]";
+}
+
+/// The verifier algorithm is phrased in terms of availability. The set of
+/// values "available" at a given point in the control flow graph is the set of
+/// correctly relocated value at that point, and is a subset of the set of
+/// definitions dominating that point.
+
+using AvailableValueSet = DenseSet<const Value *>;
+
+/// State we compute and track per basic block.
+struct BasicBlockState {
+ // Set of values available coming in, before the phi nodes
+ AvailableValueSet AvailableIn;
+
+ // Set of values available going out
+ AvailableValueSet AvailableOut;
+
+ // AvailableOut minus AvailableIn.
+ // All elements are Instructions
+ AvailableValueSet Contribution;
+
+ // True if this block contains a safepoint and thus AvailableIn does not
+ // contribute to AvailableOut.
+ bool Cleared = false;
+};
+
+/// A given derived pointer can have multiple base pointers through phi/selects.
+/// This type indicates when the base pointer is exclusively constant
+/// (ExclusivelySomeConstant), and if that constant is proven to be exclusively
+/// null, we record that as ExclusivelyNull. In all other cases, the BaseType is
+/// NonConstant.
+enum BaseType {
+ NonConstant = 1, // Base pointers is not exclusively constant.
+ ExclusivelyNull,
+ ExclusivelySomeConstant // Base pointers for a given derived pointer is from a
+ // set of constants, but they are not exclusively
+ // null.
+};
+
+/// Return the baseType for Val which states whether Val is exclusively
+/// derived from constant/null, or not exclusively derived from constant.
+/// Val is exclusively derived off a constant base when all operands of phi and
+/// selects are derived off a constant base.
+static enum BaseType getBaseType(const Value *Val) {
+
+ SmallVector<const Value *, 32> Worklist;
+ DenseSet<const Value *> Visited;
+ bool isExclusivelyDerivedFromNull = true;
+ Worklist.push_back(Val);
+ // Strip through all the bitcasts and geps to get base pointer. Also check for
+ // the exclusive value when there can be multiple base pointers (through phis
+ // or selects).
+ while(!Worklist.empty()) {
+ const Value *V = Worklist.pop_back_val();
+ if (!Visited.insert(V).second)
+ continue;
+
+ if (const auto *CI = dyn_cast<CastInst>(V)) {
+ Worklist.push_back(CI->stripPointerCasts());
+ continue;
+ }
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
+ Worklist.push_back(GEP->getPointerOperand());
+ continue;
+ }
+ // Push all the incoming values of phi node into the worklist for
+ // processing.
+ if (const auto *PN = dyn_cast<PHINode>(V)) {
+ append_range(Worklist, PN->incoming_values());
+ continue;
+ }
+ if (const auto *SI = dyn_cast<SelectInst>(V)) {
+ // Push in the true and false values
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+ if (const auto *GCRelocate = dyn_cast<GCRelocateInst>(V)) {
+ // GCRelocates do not change null-ness or constant-ness of the value.
+ // So we can continue with derived pointer this instruction relocates.
+ Worklist.push_back(GCRelocate->getDerivedPtr());
+ continue;
+ }
+ if (const auto *FI = dyn_cast<FreezeInst>(V)) {
+ // Freeze does not change null-ness or constant-ness of the value.
+ Worklist.push_back(FI->getOperand(0));
+ continue;
+ }
+ if (isa<Constant>(V)) {
+ // We found at least one base pointer which is non-null, so this derived
+ // pointer is not exclusively derived from null.
+ if (V != Constant::getNullValue(V->getType()))
+ isExclusivelyDerivedFromNull = false;
+ // Continue processing the remaining values to make sure it's exclusively
+ // constant.
+ continue;
+ }
+ // At this point, we know that the base pointer is not exclusively
+ // constant.
+ return BaseType::NonConstant;
+ }
+ // Now, we know that the base pointer is exclusively constant, but we need to
+ // differentiate between exclusive null constant and non-null constant.
+ return isExclusivelyDerivedFromNull ? BaseType::ExclusivelyNull
+ : BaseType::ExclusivelySomeConstant;
+}
+
+static bool isNotExclusivelyConstantDerived(const Value *V) {
+ return getBaseType(V) == BaseType::NonConstant;
+}
+
+namespace {
+class InstructionVerifier;
+
+/// Builds BasicBlockState for each BB of the function.
+/// It can traverse function for verification and provides all required
+/// information.
+///
+/// GC pointer may be in one of three states: relocated, unrelocated and
+/// poisoned.
+/// Relocated pointer may be used without any restrictions.
+/// Unrelocated pointer cannot be dereferenced, passed as argument to any call
+/// or returned. Unrelocated pointer may be safely compared against another
+/// unrelocated pointer or against a pointer exclusively derived from null.
+/// Poisoned pointers are produced when we somehow derive pointer from relocated
+/// and unrelocated pointers (e.g. phi, select). This pointers may be safely
+/// used in a very limited number of situations. Currently the only way to use
+/// it is comparison against constant exclusively derived from null. All
+/// limitations arise due to their undefined state: this pointers should be
+/// treated as relocated and unrelocated simultaneously.
+/// Rules of deriving:
+/// R + U = P - that's where the poisoned pointers come from
+/// P + X = P
+/// U + U = U
+/// R + R = R
+/// X + C = X
+/// Where "+" - any operation that somehow derive pointer, U - unrelocated,
+/// R - relocated and P - poisoned, C - constant, X - U or R or P or C or
+/// nothing (in case when "+" is unary operation).
+/// Deriving of pointers by itself is always safe.
+/// NOTE: when we are making decision on the status of instruction's result:
+/// a) for phi we need to check status of each input *at the end of
+/// corresponding predecessor BB*.
+/// b) for other instructions we need to check status of each input *at the
+/// current point*.
+///
+/// FIXME: This works fairly well except one case
+/// bb1:
+/// p = *some GC-ptr def*
+/// p1 = gep p, offset
+/// / |
+/// / |
+/// bb2: |
+/// safepoint |
+/// \ |
+/// \ |
+/// bb3:
+/// p2 = phi [p, bb2] [p1, bb1]
+/// p3 = phi [p, bb2] [p, bb1]
+/// here p and p1 is unrelocated
+/// p2 and p3 is poisoned (though they shouldn't be)
+///
+/// This leads to some weird results:
+/// cmp eq p, p2 - illegal instruction (false-positive)
+/// cmp eq p1, p2 - illegal instruction (false-positive)
+/// cmp eq p, p3 - illegal instruction (false-positive)
+/// cmp eq p, p1 - ok
+/// To fix this we need to introduce conception of generations and be able to
+/// check if two values belong to one generation or not. This way p2 will be
+/// considered to be unrelocated and no false alarm will happen.
+class GCPtrTracker {
+ const Function &F;
+ const CFGDeadness &CD;
+ SpecificBumpPtrAllocator<BasicBlockState> BSAllocator;
+ DenseMap<const BasicBlock *, BasicBlockState *> BlockMap;
+ // This set contains defs of unrelocated pointers that are proved to be legal
+ // and don't need verification.
+ DenseSet<const Instruction *> ValidUnrelocatedDefs;
+ // This set contains poisoned defs. They can be safely ignored during
+ // verification too.
+ DenseSet<const Value *> PoisonedDefs;
+
+public:
+ GCPtrTracker(const Function &F, const DominatorTree &DT,
+ const CFGDeadness &CD);
+
+ bool hasLiveIncomingEdge(const PHINode *PN, const BasicBlock *InBB) const {
+ return CD.hasLiveIncomingEdge(PN, InBB);
+ }
+
+ BasicBlockState *getBasicBlockState(const BasicBlock *BB);
+ const BasicBlockState *getBasicBlockState(const BasicBlock *BB) const;
+
+ bool isValuePoisoned(const Value *V) const { return PoisonedDefs.count(V); }
+
+ /// Traverse each BB of the function and call
+ /// InstructionVerifier::verifyInstruction for each possibly invalid
+ /// instruction.
+ /// It destructively modifies GCPtrTracker so it's passed via rvalue reference
+ /// in order to prohibit further usages of GCPtrTracker as it'll be in
+ /// inconsistent state.
+ static void verifyFunction(GCPtrTracker &&Tracker,
+ InstructionVerifier &Verifier);
+
+ /// Returns true for reachable and live blocks.
+ bool isMapped(const BasicBlock *BB) const {
+ return BlockMap.find(BB) != BlockMap.end();
+ }
+
+private:
+ /// Returns true if the instruction may be safely skipped during verification.
+ bool instructionMayBeSkipped(const Instruction *I) const;
+
+ /// Iterates over all BBs from BlockMap and recalculates AvailableIn/Out for
+ /// each of them until it converges.
+ void recalculateBBsStates();
+
+ /// Remove from Contribution all defs that legally produce unrelocated
+ /// pointers and saves them to ValidUnrelocatedDefs.
+ /// Though Contribution should belong to BBS it is passed separately with
+ /// different const-modifier in order to emphasize (and guarantee) that only
+ /// Contribution will be changed.
+ /// Returns true if Contribution was changed otherwise false.
+ bool removeValidUnrelocatedDefs(const BasicBlock *BB,
+ const BasicBlockState *BBS,
+ AvailableValueSet &Contribution);
+
+ /// Gather all the definitions dominating the start of BB into Result. This is
+ /// simply the defs introduced by every dominating basic block and the
+ /// function arguments.
+ void gatherDominatingDefs(const BasicBlock *BB, AvailableValueSet &Result,
+ const DominatorTree &DT);
+
+ /// Compute the AvailableOut set for BB, based on the BasicBlockState BBS,
+ /// which is the BasicBlockState for BB.
+ /// ContributionChanged is set when the verifier runs for the first time
+ /// (in this case Contribution was changed from 'empty' to its initial state)
+ /// or when Contribution of this BB was changed since last computation.
+ static void transferBlock(const BasicBlock *BB, BasicBlockState &BBS,
+ bool ContributionChanged);
+
+ /// Model the effect of an instruction on the set of available values.
+ static void transferInstruction(const Instruction &I, bool &Cleared,
+ AvailableValueSet &Available);
+};
+
+/// It is a visitor for GCPtrTracker::verifyFunction. It decides if the
+/// instruction (which uses heap reference) is legal or not, given our safepoint
+/// semantics.
+class InstructionVerifier {
+ bool AnyInvalidUses = false;
+
+public:
+ void verifyInstruction(const GCPtrTracker *Tracker, const Instruction &I,
+ const AvailableValueSet &AvailableSet);
+
+ bool hasAnyInvalidUses() const { return AnyInvalidUses; }
+
+private:
+ void reportInvalidUse(const Value &V, const Instruction &I);
+};
+} // end anonymous namespace
+
+GCPtrTracker::GCPtrTracker(const Function &F, const DominatorTree &DT,
+ const CFGDeadness &CD) : F(F), CD(CD) {
+ // Calculate Contribution of each live BB.
+ // Allocate BB states for live blocks.
+ for (const BasicBlock &BB : F)
+ if (!CD.isDeadBlock(&BB)) {
+ BasicBlockState *BBS = new (BSAllocator.Allocate()) BasicBlockState;
+ for (const auto &I : BB)
+ transferInstruction(I, BBS->Cleared, BBS->Contribution);
+ BlockMap[&BB] = BBS;
+ }
+
+ // Initialize AvailableIn/Out sets of each BB using only information about
+ // dominating BBs.
+ for (auto &BBI : BlockMap) {
+ gatherDominatingDefs(BBI.first, BBI.second->AvailableIn, DT);
+ transferBlock(BBI.first, *BBI.second, true);
+ }
+
+ // Simulate the flow of defs through the CFG and recalculate AvailableIn/Out
+ // sets of each BB until it converges. If any def is proved to be an
+ // unrelocated pointer, it will be removed from all BBSs.
+ recalculateBBsStates();
+}
+
+BasicBlockState *GCPtrTracker::getBasicBlockState(const BasicBlock *BB) {
+ return BlockMap.lookup(BB);
+}
+
+const BasicBlockState *GCPtrTracker::getBasicBlockState(
+ const BasicBlock *BB) const {
+ return const_cast<GCPtrTracker *>(this)->getBasicBlockState(BB);
+}
+
+bool GCPtrTracker::instructionMayBeSkipped(const Instruction *I) const {
+ // Poisoned defs are skipped since they are always safe by itself by
+ // definition (for details see comment to this class).
+ return ValidUnrelocatedDefs.count(I) || PoisonedDefs.count(I);
+}
+
+void GCPtrTracker::verifyFunction(GCPtrTracker &&Tracker,
+ InstructionVerifier &Verifier) {
+ // We need RPO here to a) report always the first error b) report errors in
+ // same order from run to run.
+ ReversePostOrderTraversal<const Function *> RPOT(&Tracker.F);
+ for (const BasicBlock *BB : RPOT) {
+ BasicBlockState *BBS = Tracker.getBasicBlockState(BB);
+ if (!BBS)
+ continue;
+
+ // We destructively modify AvailableIn as we traverse the block instruction
+ // by instruction.
+ AvailableValueSet &AvailableSet = BBS->AvailableIn;
+ for (const Instruction &I : *BB) {
+ if (Tracker.instructionMayBeSkipped(&I))
+ continue; // This instruction shouldn't be added to AvailableSet.
+
+ Verifier.verifyInstruction(&Tracker, I, AvailableSet);
+
+ // Model the effect of current instruction on AvailableSet to keep the set
+ // relevant at each point of BB.
+ bool Cleared = false;
+ transferInstruction(I, Cleared, AvailableSet);
+ (void)Cleared;
+ }
+ }
+}
+
+void GCPtrTracker::recalculateBBsStates() {
+ SetVector<const BasicBlock *> Worklist;
+ // TODO: This order is suboptimal, it's better to replace it with priority
+ // queue where priority is RPO number of BB.
+ for (auto &BBI : BlockMap)
+ Worklist.insert(BBI.first);
+
+ // This loop iterates the AvailableIn/Out sets until it converges.
+ // The AvailableIn and AvailableOut sets decrease as we iterate.
+ while (!Worklist.empty()) {
+ const BasicBlock *BB = Worklist.pop_back_val();
+ BasicBlockState *BBS = getBasicBlockState(BB);
+ if (!BBS)
+ continue; // Ignore dead successors.
+
+ size_t OldInCount = BBS->AvailableIn.size();
+ for (const_pred_iterator PredIt(BB), End(BB, true); PredIt != End; ++PredIt) {
+ const BasicBlock *PBB = *PredIt;
+ BasicBlockState *PBBS = getBasicBlockState(PBB);
+ if (PBBS && !CD.isDeadEdge(&CFGDeadness::getEdge(PredIt)))
+ set_intersect(BBS->AvailableIn, PBBS->AvailableOut);
+ }
+
+ assert(OldInCount >= BBS->AvailableIn.size() && "invariant!");
+
+ bool InputsChanged = OldInCount != BBS->AvailableIn.size();
+ bool ContributionChanged =
+ removeValidUnrelocatedDefs(BB, BBS, BBS->Contribution);
+ if (!InputsChanged && !ContributionChanged)
+ continue;
+
+ size_t OldOutCount = BBS->AvailableOut.size();
+ transferBlock(BB, *BBS, ContributionChanged);
+ if (OldOutCount != BBS->AvailableOut.size()) {
+ assert(OldOutCount > BBS->AvailableOut.size() && "invariant!");
+ Worklist.insert(succ_begin(BB), succ_end(BB));
+ }
+ }
+}
+
+bool GCPtrTracker::removeValidUnrelocatedDefs(const BasicBlock *BB,
+ const BasicBlockState *BBS,
+ AvailableValueSet &Contribution) {
+ assert(&BBS->Contribution == &Contribution &&
+ "Passed Contribution should be from the passed BasicBlockState!");
+ AvailableValueSet AvailableSet = BBS->AvailableIn;
+ bool ContributionChanged = false;
+ // For explanation why instructions are processed this way see
+ // "Rules of deriving" in the comment to this class.
+ for (const Instruction &I : *BB) {
+ bool ValidUnrelocatedPointerDef = false;
+ bool PoisonedPointerDef = false;
+ // TODO: `select` instructions should be handled here too.
+ if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ if (containsGCPtrType(PN->getType())) {
+ // If both is true, output is poisoned.
+ bool HasRelocatedInputs = false;
+ bool HasUnrelocatedInputs = false;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ const BasicBlock *InBB = PN->getIncomingBlock(i);
+ if (!isMapped(InBB) ||
+ !CD.hasLiveIncomingEdge(PN, InBB))
+ continue; // Skip dead block or dead edge.
+
+ const Value *InValue = PN->getIncomingValue(i);
+
+ if (isNotExclusivelyConstantDerived(InValue)) {
+ if (isValuePoisoned(InValue)) {
+ // If any of inputs is poisoned, output is always poisoned too.
+ HasRelocatedInputs = true;
+ HasUnrelocatedInputs = true;
+ break;
+ }
+ if (BlockMap[InBB]->AvailableOut.count(InValue))
+ HasRelocatedInputs = true;
+ else
+ HasUnrelocatedInputs = true;
+ }
+ }
+ if (HasUnrelocatedInputs) {
+ if (HasRelocatedInputs)
+ PoisonedPointerDef = true;
+ else
+ ValidUnrelocatedPointerDef = true;
+ }
+ }
+ } else if ((isa<GetElementPtrInst>(I) || isa<BitCastInst>(I)) &&
+ containsGCPtrType(I.getType())) {
+ // GEP/bitcast of unrelocated pointer is legal by itself but this def
+ // shouldn't appear in any AvailableSet.
+ for (const Value *V : I.operands())
+ if (containsGCPtrType(V->getType()) &&
+ isNotExclusivelyConstantDerived(V) && !AvailableSet.count(V)) {
+ if (isValuePoisoned(V))
+ PoisonedPointerDef = true;
+ else
+ ValidUnrelocatedPointerDef = true;
+ break;
+ }
+ }
+ assert(!(ValidUnrelocatedPointerDef && PoisonedPointerDef) &&
+ "Value cannot be both unrelocated and poisoned!");
+ if (ValidUnrelocatedPointerDef) {
+ // Remove def of unrelocated pointer from Contribution of this BB and
+ // trigger update of all its successors.
+ Contribution.erase(&I);
+ PoisonedDefs.erase(&I);
+ ValidUnrelocatedDefs.insert(&I);
+ LLVM_DEBUG(dbgs() << "Removing urelocated " << I
+ << " from Contribution of " << BB->getName() << "\n");
+ ContributionChanged = true;
+ } else if (PoisonedPointerDef) {
+ // Mark pointer as poisoned, remove its def from Contribution and trigger
+ // update of all successors.
+ Contribution.erase(&I);
+ PoisonedDefs.insert(&I);
+ LLVM_DEBUG(dbgs() << "Removing poisoned " << I << " from Contribution of "
+ << BB->getName() << "\n");
+ ContributionChanged = true;
+ } else {
+ bool Cleared = false;
+ transferInstruction(I, Cleared, AvailableSet);
+ (void)Cleared;
+ }
+ }
+ return ContributionChanged;
+}
+
+void GCPtrTracker::gatherDominatingDefs(const BasicBlock *BB,
+ AvailableValueSet &Result,
+ const DominatorTree &DT) {
+ DomTreeNode *DTN = DT[const_cast<BasicBlock *>(BB)];
+
+ assert(DTN && "Unreachable blocks are ignored");
+ while (DTN->getIDom()) {
+ DTN = DTN->getIDom();
+ auto BBS = getBasicBlockState(DTN->getBlock());
+ assert(BBS && "immediate dominator cannot be dead for a live block");
+ const auto &Defs = BBS->Contribution;
+ Result.insert(Defs.begin(), Defs.end());
+ // If this block is 'Cleared', then nothing LiveIn to this block can be
+ // available after this block completes. Note: This turns out to be
+ // really important for reducing memory consuption of the initial available
+ // sets and thus peak memory usage by this verifier.
+ if (BBS->Cleared)
+ return;
+ }
+
+ for (const Argument &A : BB->getParent()->args())
+ if (containsGCPtrType(A.getType()))
+ Result.insert(&A);
+}
+
+void GCPtrTracker::transferBlock(const BasicBlock *BB, BasicBlockState &BBS,
+ bool ContributionChanged) {
+ const AvailableValueSet &AvailableIn = BBS.AvailableIn;
+ AvailableValueSet &AvailableOut = BBS.AvailableOut;
+
+ if (BBS.Cleared) {
+ // AvailableOut will change only when Contribution changed.
+ if (ContributionChanged)
+ AvailableOut = BBS.Contribution;
+ } else {
+ // Otherwise, we need to reduce the AvailableOut set by things which are no
+ // longer in our AvailableIn
+ AvailableValueSet Temp = BBS.Contribution;
+ set_union(Temp, AvailableIn);
+ AvailableOut = std::move(Temp);
+ }
+
+ LLVM_DEBUG(dbgs() << "Transfered block " << BB->getName() << " from ";
+ PrintValueSet(dbgs(), AvailableIn.begin(), AvailableIn.end());
+ dbgs() << " to ";
+ PrintValueSet(dbgs(), AvailableOut.begin(), AvailableOut.end());
+ dbgs() << "\n";);
+}
+
+void GCPtrTracker::transferInstruction(const Instruction &I, bool &Cleared,
+ AvailableValueSet &Available) {
+ if (isa<GCStatepointInst>(I)) {
+ Cleared = true;
+ Available.clear();
+ } else if (containsGCPtrType(I.getType()))
+ Available.insert(&I);
+}
+
+void InstructionVerifier::verifyInstruction(
+ const GCPtrTracker *Tracker, const Instruction &I,
+ const AvailableValueSet &AvailableSet) {
+ if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ if (containsGCPtrType(PN->getType()))
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ const BasicBlock *InBB = PN->getIncomingBlock(i);
+ const BasicBlockState *InBBS = Tracker->getBasicBlockState(InBB);
+ if (!InBBS ||
+ !Tracker->hasLiveIncomingEdge(PN, InBB))
+ continue; // Skip dead block or dead edge.
+
+ const Value *InValue = PN->getIncomingValue(i);
+
+ if (isNotExclusivelyConstantDerived(InValue) &&
+ !InBBS->AvailableOut.count(InValue))
+ reportInvalidUse(*InValue, *PN);
+ }
+ } else if (isa<CmpInst>(I) &&
+ containsGCPtrType(I.getOperand(0)->getType())) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ enum BaseType baseTyLHS = getBaseType(LHS),
+ baseTyRHS = getBaseType(RHS);
+
+ // Returns true if LHS and RHS are unrelocated pointers and they are
+ // valid unrelocated uses.
+ auto hasValidUnrelocatedUse = [&AvailableSet, Tracker, baseTyLHS, baseTyRHS,
+ &LHS, &RHS] () {
+ // A cmp instruction has valid unrelocated pointer operands only if
+ // both operands are unrelocated pointers.
+ // In the comparison between two pointers, if one is an unrelocated
+ // use, the other *should be* an unrelocated use, for this
+ // instruction to contain valid unrelocated uses. This unrelocated
+ // use can be a null constant as well, or another unrelocated
+ // pointer.
+ if (AvailableSet.count(LHS) || AvailableSet.count(RHS))
+ return false;
+ // Constant pointers (that are not exclusively null) may have
+ // meaning in different VMs, so we cannot reorder the compare
+ // against constant pointers before the safepoint. In other words,
+ // comparison of an unrelocated use against a non-null constant
+ // maybe invalid.
+ if ((baseTyLHS == BaseType::ExclusivelySomeConstant &&
+ baseTyRHS == BaseType::NonConstant) ||
+ (baseTyLHS == BaseType::NonConstant &&
+ baseTyRHS == BaseType::ExclusivelySomeConstant))
+ return false;
+
+ // If one of pointers is poisoned and other is not exclusively derived
+ // from null it is an invalid expression: it produces poisoned result
+ // and unless we want to track all defs (not only gc pointers) the only
+ // option is to prohibit such instructions.
+ if ((Tracker->isValuePoisoned(LHS) && baseTyRHS != ExclusivelyNull) ||
+ (Tracker->isValuePoisoned(RHS) && baseTyLHS != ExclusivelyNull))
+ return false;
+
+ // All other cases are valid cases enumerated below:
+ // 1. Comparison between an exclusively derived null pointer and a
+ // constant base pointer.
+ // 2. Comparison between an exclusively derived null pointer and a
+ // non-constant unrelocated base pointer.
+ // 3. Comparison between 2 unrelocated pointers.
+ // 4. Comparison between a pointer exclusively derived from null and a
+ // non-constant poisoned pointer.
+ return true;
+ };
+ if (!hasValidUnrelocatedUse()) {
+ // Print out all non-constant derived pointers that are unrelocated
+ // uses, which are invalid.
+ if (baseTyLHS == BaseType::NonConstant && !AvailableSet.count(LHS))
+ reportInvalidUse(*LHS, I);
+ if (baseTyRHS == BaseType::NonConstant && !AvailableSet.count(RHS))
+ reportInvalidUse(*RHS, I);
+ }
+ } else {
+ for (const Value *V : I.operands())
+ if (containsGCPtrType(V->getType()) &&
+ isNotExclusivelyConstantDerived(V) && !AvailableSet.count(V))
+ reportInvalidUse(*V, I);
+ }
+}
+
+void InstructionVerifier::reportInvalidUse(const Value &V,
+ const Instruction &I) {
+ errs() << "Illegal use of unrelocated value found!\n";
+ errs() << "Def: " << V << "\n";
+ errs() << "Use: " << I << "\n";
+ if (!PrintOnly)
+ abort();
+ AnyInvalidUses = true;
+}
+
+static void Verify(const Function &F, const DominatorTree &DT,
+ const CFGDeadness &CD) {
+ LLVM_DEBUG(dbgs() << "Verifying gc pointers in function: " << F.getName()
+ << "\n");
+ if (PrintOnly)
+ dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n";
+
+ GCPtrTracker Tracker(F, DT, CD);
+
+ // We now have all the information we need to decide if the use of a heap
+ // reference is legal or not, given our safepoint semantics.
+
+ InstructionVerifier Verifier;
+ GCPtrTracker::verifyFunction(std::move(Tracker), Verifier);
+
+ if (PrintOnly && !Verifier.hasAnyInvalidUses()) {
+ dbgs() << "No illegal uses found by SafepointIRVerifier in: " << F.getName()
+ << "\n";
+ }
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Statepoint.cpp b/contrib/llvm-project/llvm/lib/IR/Statepoint.cpp
new file mode 100644
index 000000000000..508e3cb71ed2
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Statepoint.cpp
@@ -0,0 +1,40 @@
+//===-- IR/Statepoint.cpp -- gc.statepoint utilities --- -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some utility functions to help recognize gc.statepoint
+// intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Statepoint.h"
+
+using namespace llvm;
+
+bool llvm::isStatepointDirectiveAttr(Attribute Attr) {
+ return Attr.hasAttribute("statepoint-id") ||
+ Attr.hasAttribute("statepoint-num-patch-bytes");
+}
+
+StatepointDirectives
+llvm::parseStatepointDirectivesFromAttrs(AttributeList AS) {
+ StatepointDirectives Result;
+
+ Attribute AttrID = AS.getFnAttr("statepoint-id");
+ uint64_t StatepointID;
+ if (AttrID.isStringAttribute())
+ if (!AttrID.getValueAsString().getAsInteger(10, StatepointID))
+ Result.StatepointID = StatepointID;
+
+ uint32_t NumPatchBytes;
+ Attribute AttrNumPatchBytes = AS.getFnAttr("statepoint-num-patch-bytes");
+ if (AttrNumPatchBytes.isStringAttribute())
+ if (!AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes))
+ Result.NumPatchBytes = NumPatchBytes;
+
+ return Result;
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/StructuralHash.cpp b/contrib/llvm-project/llvm/lib/IR/StructuralHash.cpp
new file mode 100644
index 000000000000..5a6e07451326
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/StructuralHash.cpp
@@ -0,0 +1,84 @@
+//===-- StructuralHash.cpp - IR Hash for expensive checks -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifdef EXPENSIVE_CHECKS
+
+#include "llvm/IR/StructuralHash.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+
+using namespace llvm;
+
+namespace {
+namespace details {
+
+// Basic hashing mechanism to detect structural change to the IR, used to verify
+// pass return status consistency with actual change. Loosely copied from
+// llvm/lib/Transforms/Utils/FunctionComparator.cpp
+
+class StructuralHash {
+ uint64_t Hash = 0x6acaa36bef8325c5ULL;
+
+ void update(uint64_t V) { Hash = hashing::detail::hash_16_bytes(Hash, V); }
+
+public:
+ StructuralHash() = default;
+
+ void update(const Function &F) {
+ if (F.empty())
+ return;
+
+ update(F.isVarArg());
+ update(F.arg_size());
+
+ SmallVector<const BasicBlock *, 8> BBs;
+ SmallPtrSet<const BasicBlock *, 16> VisitedBBs;
+
+ BBs.push_back(&F.getEntryBlock());
+ VisitedBBs.insert(BBs[0]);
+ while (!BBs.empty()) {
+ const BasicBlock *BB = BBs.pop_back_val();
+ update(45798); // Block header
+ for (auto &Inst : *BB)
+ update(Inst.getOpcode());
+
+ const Instruction *Term = BB->getTerminator();
+ for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
+ if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
+ continue;
+ BBs.push_back(Term->getSuccessor(i));
+ }
+ }
+ }
+
+ void update(const Module &M) {
+ for (const Function &F : M)
+ update(F);
+ }
+
+ uint64_t getHash() const { return Hash; }
+};
+
+} // namespace details
+
+} // namespace
+
+uint64_t llvm::StructuralHash(const Function &F) {
+ details::StructuralHash H;
+ H.update(F);
+ return H.getHash();
+}
+
+uint64_t llvm::StructuralHash(const Module &M) {
+ details::StructuralHash H;
+ H.update(M);
+ return H.getHash();
+}
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/IR/SymbolTableListTraitsImpl.h b/contrib/llvm-project/llvm/lib/IR/SymbolTableListTraitsImpl.h
new file mode 100644
index 000000000000..4283744bd058
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/SymbolTableListTraitsImpl.h
@@ -0,0 +1,124 @@
+//===-- llvm/SymbolTableListTraitsImpl.h - Implementation ------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the stickier parts of the SymbolTableListTraits class,
+// and is explicitly instantiated where needed to avoid defining all this code
+// in a widely used header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_IR_SYMBOLTABLELISTTRAITSIMPL_H
+#define LLVM_LIB_IR_SYMBOLTABLELISTTRAITSIMPL_H
+
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/ValueSymbolTable.h"
+
+namespace llvm {
+
+/// Notify basic blocks when an instruction is inserted.
+template <typename ParentClass>
+inline void invalidateParentIListOrdering(ParentClass *Parent) {}
+template <> void invalidateParentIListOrdering(BasicBlock *BB);
+
+/// setSymTabObject - This is called when (f.e.) the parent of a basic block
+/// changes. This requires us to remove all the instruction symtab entries from
+/// the current function and reinsert them into the new function.
+template <typename ValueSubClass>
+template <typename TPtr>
+void SymbolTableListTraits<ValueSubClass>::setSymTabObject(TPtr *Dest,
+ TPtr Src) {
+ // Get the old symtab and value list before doing the assignment.
+ ValueSymbolTable *OldST = getSymTab(getListOwner());
+
+ // Do it.
+ *Dest = Src;
+
+ // Get the new SymTab object.
+ ValueSymbolTable *NewST = getSymTab(getListOwner());
+
+ // If there is nothing to do, quick exit.
+ if (OldST == NewST) return;
+
+ // Move all the elements from the old symtab to the new one.
+ ListTy &ItemList = getList(getListOwner());
+ if (ItemList.empty()) return;
+
+ if (OldST) {
+ // Remove all entries from the previous symtab.
+ for (auto I = ItemList.begin(); I != ItemList.end(); ++I)
+ if (I->hasName())
+ OldST->removeValueName(I->getValueName());
+ }
+
+ if (NewST) {
+ // Add all of the items to the new symtab.
+ for (auto I = ItemList.begin(); I != ItemList.end(); ++I)
+ if (I->hasName())
+ NewST->reinsertValue(&*I);
+ }
+
+}
+
+template <typename ValueSubClass>
+void SymbolTableListTraits<ValueSubClass>::addNodeToList(ValueSubClass *V) {
+ assert(!V->getParent() && "Value already in a container!!");
+ ItemParentClass *Owner = getListOwner();
+ V->setParent(Owner);
+ invalidateParentIListOrdering(Owner);
+ if (V->hasName())
+ if (ValueSymbolTable *ST = getSymTab(Owner))
+ ST->reinsertValue(V);
+}
+
+template <typename ValueSubClass>
+void SymbolTableListTraits<ValueSubClass>::removeNodeFromList(
+ ValueSubClass *V) {
+ V->setParent(nullptr);
+ if (V->hasName())
+ if (ValueSymbolTable *ST = getSymTab(getListOwner()))
+ ST->removeValueName(V->getValueName());
+}
+
+template <typename ValueSubClass>
+void SymbolTableListTraits<ValueSubClass>::transferNodesFromList(
+ SymbolTableListTraits &L2, iterator first, iterator last) {
+ // Transfering nodes, even within the same BB, invalidates the ordering. The
+ // list that we removed the nodes from still has a valid ordering.
+ ItemParentClass *NewIP = getListOwner();
+ invalidateParentIListOrdering(NewIP);
+
+ // Nothing else needs to be done if we're reording nodes within the same list.
+ ItemParentClass *OldIP = L2.getListOwner();
+ if (NewIP == OldIP)
+ return;
+
+ // We only have to update symbol table entries if we are transferring the
+ // instructions to a different symtab object...
+ ValueSymbolTable *NewST = getSymTab(NewIP);
+ ValueSymbolTable *OldST = getSymTab(OldIP);
+ if (NewST != OldST) {
+ for (; first != last; ++first) {
+ ValueSubClass &V = *first;
+ bool HasName = V.hasName();
+ if (OldST && HasName)
+ OldST->removeValueName(V.getValueName());
+ V.setParent(NewIP);
+ if (NewST && HasName)
+ NewST->reinsertValue(&V);
+ }
+ } else {
+ // Just transferring between blocks in the same function, simply update the
+ // parent fields in the instructions...
+ for (; first != last; ++first)
+ first->setParent(NewIP);
+ }
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm-project/llvm/lib/IR/Type.cpp b/contrib/llvm-project/llvm/lib/IR/Type.cpp
new file mode 100644
index 000000000000..85b658c8a52f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Type.cpp
@@ -0,0 +1,786 @@
+//===- Type.cpp - Implement the Type class --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Type class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Type.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/TypeSize.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <utility>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Type Class Implementation
+//===----------------------------------------------------------------------===//
+
+Type *Type::getPrimitiveType(LLVMContext &C, TypeID IDNumber) {
+ switch (IDNumber) {
+ case VoidTyID : return getVoidTy(C);
+ case HalfTyID : return getHalfTy(C);
+ case BFloatTyID : return getBFloatTy(C);
+ case FloatTyID : return getFloatTy(C);
+ case DoubleTyID : return getDoubleTy(C);
+ case X86_FP80TyID : return getX86_FP80Ty(C);
+ case FP128TyID : return getFP128Ty(C);
+ case PPC_FP128TyID : return getPPC_FP128Ty(C);
+ case LabelTyID : return getLabelTy(C);
+ case MetadataTyID : return getMetadataTy(C);
+ case X86_MMXTyID : return getX86_MMXTy(C);
+ case X86_AMXTyID : return getX86_AMXTy(C);
+ case TokenTyID : return getTokenTy(C);
+ default:
+ return nullptr;
+ }
+}
+
+bool Type::isIntegerTy(unsigned Bitwidth) const {
+ return isIntegerTy() && cast<IntegerType>(this)->getBitWidth() == Bitwidth;
+}
+
+bool Type::isOpaquePointerTy() const {
+ if (auto *PTy = dyn_cast<PointerType>(this))
+ return PTy->isOpaque();
+ return false;
+}
+
+const fltSemantics &Type::getFltSemantics() const {
+ switch (getTypeID()) {
+ case HalfTyID: return APFloat::IEEEhalf();
+ case BFloatTyID: return APFloat::BFloat();
+ case FloatTyID: return APFloat::IEEEsingle();
+ case DoubleTyID: return APFloat::IEEEdouble();
+ case X86_FP80TyID: return APFloat::x87DoubleExtended();
+ case FP128TyID: return APFloat::IEEEquad();
+ case PPC_FP128TyID: return APFloat::PPCDoubleDouble();
+ default: llvm_unreachable("Invalid floating type");
+ }
+}
+
+bool Type::isIEEE() const {
+ return APFloat::getZero(getFltSemantics()).isIEEE();
+}
+
+Type *Type::getFloatingPointTy(LLVMContext &C, const fltSemantics &S) {
+ Type *Ty;
+ if (&S == &APFloat::IEEEhalf())
+ Ty = Type::getHalfTy(C);
+ else if (&S == &APFloat::BFloat())
+ Ty = Type::getBFloatTy(C);
+ else if (&S == &APFloat::IEEEsingle())
+ Ty = Type::getFloatTy(C);
+ else if (&S == &APFloat::IEEEdouble())
+ Ty = Type::getDoubleTy(C);
+ else if (&S == &APFloat::x87DoubleExtended())
+ Ty = Type::getX86_FP80Ty(C);
+ else if (&S == &APFloat::IEEEquad())
+ Ty = Type::getFP128Ty(C);
+ else {
+ assert(&S == &APFloat::PPCDoubleDouble() && "Unknown FP format");
+ Ty = Type::getPPC_FP128Ty(C);
+ }
+ return Ty;
+}
+
+bool Type::canLosslesslyBitCastTo(Type *Ty) const {
+ // Identity cast means no change so return true
+ if (this == Ty)
+ return true;
+
+ // They are not convertible unless they are at least first class types
+ if (!this->isFirstClassType() || !Ty->isFirstClassType())
+ return false;
+
+ // Vector -> Vector conversions are always lossless if the two vector types
+ // have the same size, otherwise not.
+ if (isa<VectorType>(this) && isa<VectorType>(Ty))
+ return getPrimitiveSizeInBits() == Ty->getPrimitiveSizeInBits();
+
+ // 64-bit fixed width vector types can be losslessly converted to x86mmx.
+ if (((isa<FixedVectorType>(this)) && Ty->isX86_MMXTy()) &&
+ getPrimitiveSizeInBits().getFixedSize() == 64)
+ return true;
+ if ((isX86_MMXTy() && isa<FixedVectorType>(Ty)) &&
+ Ty->getPrimitiveSizeInBits().getFixedSize() == 64)
+ return true;
+
+ // 8192-bit fixed width vector types can be losslessly converted to x86amx.
+ if (((isa<FixedVectorType>(this)) && Ty->isX86_AMXTy()) &&
+ getPrimitiveSizeInBits().getFixedSize() == 8192)
+ return true;
+ if ((isX86_AMXTy() && isa<FixedVectorType>(Ty)) &&
+ Ty->getPrimitiveSizeInBits().getFixedSize() == 8192)
+ return true;
+
+ // At this point we have only various mismatches of the first class types
+ // remaining and ptr->ptr. Just select the lossless conversions. Everything
+ // else is not lossless. Conservatively assume we can't losslessly convert
+ // between pointers with different address spaces.
+ if (auto *PTy = dyn_cast<PointerType>(this)) {
+ if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
+ return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
+ return false;
+ }
+ return false; // Other types have no identity values
+}
+
+bool Type::isEmptyTy() const {
+ if (auto *ATy = dyn_cast<ArrayType>(this)) {
+ unsigned NumElements = ATy->getNumElements();
+ return NumElements == 0 || ATy->getElementType()->isEmptyTy();
+ }
+
+ if (auto *STy = dyn_cast<StructType>(this)) {
+ unsigned NumElements = STy->getNumElements();
+ for (unsigned i = 0; i < NumElements; ++i)
+ if (!STy->getElementType(i)->isEmptyTy())
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
+TypeSize Type::getPrimitiveSizeInBits() const {
+ switch (getTypeID()) {
+ case Type::HalfTyID: return TypeSize::Fixed(16);
+ case Type::BFloatTyID: return TypeSize::Fixed(16);
+ case Type::FloatTyID: return TypeSize::Fixed(32);
+ case Type::DoubleTyID: return TypeSize::Fixed(64);
+ case Type::X86_FP80TyID: return TypeSize::Fixed(80);
+ case Type::FP128TyID: return TypeSize::Fixed(128);
+ case Type::PPC_FP128TyID: return TypeSize::Fixed(128);
+ case Type::X86_MMXTyID: return TypeSize::Fixed(64);
+ case Type::X86_AMXTyID: return TypeSize::Fixed(8192);
+ case Type::IntegerTyID:
+ return TypeSize::Fixed(cast<IntegerType>(this)->getBitWidth());
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID: {
+ const VectorType *VTy = cast<VectorType>(this);
+ ElementCount EC = VTy->getElementCount();
+ TypeSize ETS = VTy->getElementType()->getPrimitiveSizeInBits();
+ assert(!ETS.isScalable() && "Vector type should have fixed-width elements");
+ return {ETS.getFixedSize() * EC.getKnownMinValue(), EC.isScalable()};
+ }
+ default: return TypeSize::Fixed(0);
+ }
+}
+
+unsigned Type::getScalarSizeInBits() const {
+ // It is safe to assume that the scalar types have a fixed size.
+ return getScalarType()->getPrimitiveSizeInBits().getFixedSize();
+}
+
+int Type::getFPMantissaWidth() const {
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ return VTy->getElementType()->getFPMantissaWidth();
+ assert(isFloatingPointTy() && "Not a floating point type!");
+ if (getTypeID() == HalfTyID) return 11;
+ if (getTypeID() == BFloatTyID) return 8;
+ if (getTypeID() == FloatTyID) return 24;
+ if (getTypeID() == DoubleTyID) return 53;
+ if (getTypeID() == X86_FP80TyID) return 64;
+ if (getTypeID() == FP128TyID) return 113;
+ assert(getTypeID() == PPC_FP128TyID && "unknown fp type");
+ return -1;
+}
+
+bool Type::isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited) const {
+ if (auto *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType()->isSized(Visited);
+
+ if (auto *VTy = dyn_cast<VectorType>(this))
+ return VTy->getElementType()->isSized(Visited);
+
+ return cast<StructType>(this)->isSized(Visited);
+}
+
+//===----------------------------------------------------------------------===//
+// Primitive 'Type' data
+//===----------------------------------------------------------------------===//
+
+Type *Type::getVoidTy(LLVMContext &C) { return &C.pImpl->VoidTy; }
+Type *Type::getLabelTy(LLVMContext &C) { return &C.pImpl->LabelTy; }
+Type *Type::getHalfTy(LLVMContext &C) { return &C.pImpl->HalfTy; }
+Type *Type::getBFloatTy(LLVMContext &C) { return &C.pImpl->BFloatTy; }
+Type *Type::getFloatTy(LLVMContext &C) { return &C.pImpl->FloatTy; }
+Type *Type::getDoubleTy(LLVMContext &C) { return &C.pImpl->DoubleTy; }
+Type *Type::getMetadataTy(LLVMContext &C) { return &C.pImpl->MetadataTy; }
+Type *Type::getTokenTy(LLVMContext &C) { return &C.pImpl->TokenTy; }
+Type *Type::getX86_FP80Ty(LLVMContext &C) { return &C.pImpl->X86_FP80Ty; }
+Type *Type::getFP128Ty(LLVMContext &C) { return &C.pImpl->FP128Ty; }
+Type *Type::getPPC_FP128Ty(LLVMContext &C) { return &C.pImpl->PPC_FP128Ty; }
+Type *Type::getX86_MMXTy(LLVMContext &C) { return &C.pImpl->X86_MMXTy; }
+Type *Type::getX86_AMXTy(LLVMContext &C) { return &C.pImpl->X86_AMXTy; }
+
+IntegerType *Type::getInt1Ty(LLVMContext &C) { return &C.pImpl->Int1Ty; }
+IntegerType *Type::getInt8Ty(LLVMContext &C) { return &C.pImpl->Int8Ty; }
+IntegerType *Type::getInt16Ty(LLVMContext &C) { return &C.pImpl->Int16Ty; }
+IntegerType *Type::getInt32Ty(LLVMContext &C) { return &C.pImpl->Int32Ty; }
+IntegerType *Type::getInt64Ty(LLVMContext &C) { return &C.pImpl->Int64Ty; }
+IntegerType *Type::getInt128Ty(LLVMContext &C) { return &C.pImpl->Int128Ty; }
+
+IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
+ return IntegerType::get(C, N);
+}
+
+PointerType *Type::getHalfPtrTy(LLVMContext &C, unsigned AS) {
+ return getHalfTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getBFloatPtrTy(LLVMContext &C, unsigned AS) {
+ return getBFloatTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getFloatPtrTy(LLVMContext &C, unsigned AS) {
+ return getFloatTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getDoublePtrTy(LLVMContext &C, unsigned AS) {
+ return getDoubleTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getX86_FP80PtrTy(LLVMContext &C, unsigned AS) {
+ return getX86_FP80Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getFP128PtrTy(LLVMContext &C, unsigned AS) {
+ return getFP128Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getPPC_FP128PtrTy(LLVMContext &C, unsigned AS) {
+ return getPPC_FP128Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getX86_MMXPtrTy(LLVMContext &C, unsigned AS) {
+ return getX86_MMXTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getX86_AMXPtrTy(LLVMContext &C, unsigned AS) {
+ return getX86_AMXTy(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) {
+ return getIntNTy(C, N)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt1PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt1Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt8PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt8Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt16PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt16Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt32PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt32Ty(C)->getPointerTo(AS);
+}
+
+PointerType *Type::getInt64PtrTy(LLVMContext &C, unsigned AS) {
+ return getInt64Ty(C)->getPointerTo(AS);
+}
+
+//===----------------------------------------------------------------------===//
+// IntegerType Implementation
+//===----------------------------------------------------------------------===//
+
+IntegerType *IntegerType::get(LLVMContext &C, unsigned NumBits) {
+ assert(NumBits >= MIN_INT_BITS && "bitwidth too small");
+ assert(NumBits <= MAX_INT_BITS && "bitwidth too large");
+
+ // Check for the built-in integer types
+ switch (NumBits) {
+ case 1: return cast<IntegerType>(Type::getInt1Ty(C));
+ case 8: return cast<IntegerType>(Type::getInt8Ty(C));
+ case 16: return cast<IntegerType>(Type::getInt16Ty(C));
+ case 32: return cast<IntegerType>(Type::getInt32Ty(C));
+ case 64: return cast<IntegerType>(Type::getInt64Ty(C));
+ case 128: return cast<IntegerType>(Type::getInt128Ty(C));
+ default:
+ break;
+ }
+
+ IntegerType *&Entry = C.pImpl->IntegerTypes[NumBits];
+
+ if (!Entry)
+ Entry = new (C.pImpl->Alloc) IntegerType(C, NumBits);
+
+ return Entry;
+}
+
+APInt IntegerType::getMask() const { return APInt::getAllOnes(getBitWidth()); }
+
+//===----------------------------------------------------------------------===//
+// FunctionType Implementation
+//===----------------------------------------------------------------------===//
+
+FunctionType::FunctionType(Type *Result, ArrayRef<Type*> Params,
+ bool IsVarArgs)
+ : Type(Result->getContext(), FunctionTyID) {
+ Type **SubTys = reinterpret_cast<Type**>(this+1);
+ assert(isValidReturnType(Result) && "invalid return type for function");
+ setSubclassData(IsVarArgs);
+
+ SubTys[0] = Result;
+
+ for (unsigned i = 0, e = Params.size(); i != e; ++i) {
+ assert(isValidArgumentType(Params[i]) &&
+ "Not a valid type for function argument!");
+ SubTys[i+1] = Params[i];
+ }
+
+ ContainedTys = SubTys;
+ NumContainedTys = Params.size() + 1; // + 1 for result type
+}
+
+// This is the factory function for the FunctionType class.
+FunctionType *FunctionType::get(Type *ReturnType,
+ ArrayRef<Type*> Params, bool isVarArg) {
+ LLVMContextImpl *pImpl = ReturnType->getContext().pImpl;
+ const FunctionTypeKeyInfo::KeyTy Key(ReturnType, Params, isVarArg);
+ FunctionType *FT;
+ // Since we only want to allocate a fresh function type in case none is found
+ // and we don't want to perform two lookups (one for checking if existent and
+ // one for inserting the newly allocated one), here we instead lookup based on
+ // Key and update the reference to the function type in-place to a newly
+ // allocated one if not found.
+ auto Insertion = pImpl->FunctionTypes.insert_as(nullptr, Key);
+ if (Insertion.second) {
+ // The function type was not found. Allocate one and update FunctionTypes
+ // in-place.
+ FT = (FunctionType *)pImpl->Alloc.Allocate(
+ sizeof(FunctionType) + sizeof(Type *) * (Params.size() + 1),
+ alignof(FunctionType));
+ new (FT) FunctionType(ReturnType, Params, isVarArg);
+ *Insertion.first = FT;
+ } else {
+ // The function type was found. Just return it.
+ FT = *Insertion.first;
+ }
+ return FT;
+}
+
+FunctionType *FunctionType::get(Type *Result, bool isVarArg) {
+ return get(Result, None, isVarArg);
+}
+
+bool FunctionType::isValidReturnType(Type *RetTy) {
+ return !RetTy->isFunctionTy() && !RetTy->isLabelTy() &&
+ !RetTy->isMetadataTy();
+}
+
+bool FunctionType::isValidArgumentType(Type *ArgTy) {
+ return ArgTy->isFirstClassType();
+}
+
+//===----------------------------------------------------------------------===//
+// StructType Implementation
+//===----------------------------------------------------------------------===//
+
+// Primitive Constructors.
+
+StructType *StructType::get(LLVMContext &Context, ArrayRef<Type*> ETypes,
+ bool isPacked) {
+ LLVMContextImpl *pImpl = Context.pImpl;
+ const AnonStructTypeKeyInfo::KeyTy Key(ETypes, isPacked);
+
+ StructType *ST;
+ // Since we only want to allocate a fresh struct type in case none is found
+ // and we don't want to perform two lookups (one for checking if existent and
+ // one for inserting the newly allocated one), here we instead lookup based on
+ // Key and update the reference to the struct type in-place to a newly
+ // allocated one if not found.
+ auto Insertion = pImpl->AnonStructTypes.insert_as(nullptr, Key);
+ if (Insertion.second) {
+ // The struct type was not found. Allocate one and update AnonStructTypes
+ // in-place.
+ ST = new (Context.pImpl->Alloc) StructType(Context);
+ ST->setSubclassData(SCDB_IsLiteral); // Literal struct.
+ ST->setBody(ETypes, isPacked);
+ *Insertion.first = ST;
+ } else {
+ // The struct type was found. Just return it.
+ ST = *Insertion.first;
+ }
+
+ return ST;
+}
+
+bool StructType::containsScalableVectorType() const {
+ for (Type *Ty : elements()) {
+ if (isa<ScalableVectorType>(Ty))
+ return true;
+ if (auto *STy = dyn_cast<StructType>(Ty))
+ if (STy->containsScalableVectorType())
+ return true;
+ }
+
+ return false;
+}
+
+void StructType::setBody(ArrayRef<Type*> Elements, bool isPacked) {
+ assert(isOpaque() && "Struct body already set!");
+
+ setSubclassData(getSubclassData() | SCDB_HasBody);
+ if (isPacked)
+ setSubclassData(getSubclassData() | SCDB_Packed);
+
+ NumContainedTys = Elements.size();
+
+ if (Elements.empty()) {
+ ContainedTys = nullptr;
+ return;
+ }
+
+ ContainedTys = Elements.copy(getContext().pImpl->Alloc).data();
+}
+
+void StructType::setName(StringRef Name) {
+ if (Name == getName()) return;
+
+ StringMap<StructType *> &SymbolTable = getContext().pImpl->NamedStructTypes;
+
+ using EntryTy = StringMap<StructType *>::MapEntryTy;
+
+ // If this struct already had a name, remove its symbol table entry. Don't
+ // delete the data yet because it may be part of the new name.
+ if (SymbolTableEntry)
+ SymbolTable.remove((EntryTy *)SymbolTableEntry);
+
+ // If this is just removing the name, we're done.
+ if (Name.empty()) {
+ if (SymbolTableEntry) {
+ // Delete the old string data.
+ ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
+ SymbolTableEntry = nullptr;
+ }
+ return;
+ }
+
+ // Look up the entry for the name.
+ auto IterBool =
+ getContext().pImpl->NamedStructTypes.insert(std::make_pair(Name, this));
+
+ // While we have a name collision, try a random rename.
+ if (!IterBool.second) {
+ SmallString<64> TempStr(Name);
+ TempStr.push_back('.');
+ raw_svector_ostream TmpStream(TempStr);
+ unsigned NameSize = Name.size();
+
+ do {
+ TempStr.resize(NameSize + 1);
+ TmpStream << getContext().pImpl->NamedStructTypesUniqueID++;
+
+ IterBool = getContext().pImpl->NamedStructTypes.insert(
+ std::make_pair(TmpStream.str(), this));
+ } while (!IterBool.second);
+ }
+
+ // Delete the old string data.
+ if (SymbolTableEntry)
+ ((EntryTy *)SymbolTableEntry)->Destroy(SymbolTable.getAllocator());
+ SymbolTableEntry = &*IterBool.first;
+}
+
+//===----------------------------------------------------------------------===//
+// StructType Helper functions.
+
+StructType *StructType::create(LLVMContext &Context, StringRef Name) {
+ StructType *ST = new (Context.pImpl->Alloc) StructType(Context);
+ if (!Name.empty())
+ ST->setName(Name);
+ return ST;
+}
+
+StructType *StructType::get(LLVMContext &Context, bool isPacked) {
+ return get(Context, None, isPacked);
+}
+
+StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements,
+ StringRef Name, bool isPacked) {
+ StructType *ST = create(Context, Name);
+ ST->setBody(Elements, isPacked);
+ return ST;
+}
+
+StructType *StructType::create(LLVMContext &Context, ArrayRef<Type*> Elements) {
+ return create(Context, Elements, StringRef());
+}
+
+StructType *StructType::create(LLVMContext &Context) {
+ return create(Context, StringRef());
+}
+
+StructType *StructType::create(ArrayRef<Type*> Elements, StringRef Name,
+ bool isPacked) {
+ assert(!Elements.empty() &&
+ "This method may not be invoked with an empty list");
+ return create(Elements[0]->getContext(), Elements, Name, isPacked);
+}
+
+StructType *StructType::create(ArrayRef<Type*> Elements) {
+ assert(!Elements.empty() &&
+ "This method may not be invoked with an empty list");
+ return create(Elements[0]->getContext(), Elements, StringRef());
+}
+
+bool StructType::isSized(SmallPtrSetImpl<Type*> *Visited) const {
+ if ((getSubclassData() & SCDB_IsSized) != 0)
+ return true;
+ if (isOpaque())
+ return false;
+
+ if (Visited && !Visited->insert(const_cast<StructType*>(this)).second)
+ return false;
+
+ // Okay, our struct is sized if all of the elements are, but if one of the
+ // elements is opaque, the struct isn't sized *yet*, but may become sized in
+ // the future, so just bail out without caching.
+ for (Type *Ty : elements()) {
+ // If the struct contains a scalable vector type, don't consider it sized.
+ // This prevents it from being used in loads/stores/allocas/GEPs.
+ if (isa<ScalableVectorType>(Ty))
+ return false;
+ if (!Ty->isSized(Visited))
+ return false;
+ }
+
+ // Here we cheat a bit and cast away const-ness. The goal is to memoize when
+ // we find a sized type, as types can only move from opaque to sized, not the
+ // other way.
+ const_cast<StructType*>(this)->setSubclassData(
+ getSubclassData() | SCDB_IsSized);
+ return true;
+}
+
+StringRef StructType::getName() const {
+ assert(!isLiteral() && "Literal structs never have names");
+ if (!SymbolTableEntry) return StringRef();
+
+ return ((StringMapEntry<StructType*> *)SymbolTableEntry)->getKey();
+}
+
+bool StructType::isValidElementType(Type *ElemTy) {
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() &&
+ !ElemTy->isTokenTy();
+}
+
+bool StructType::isLayoutIdentical(StructType *Other) const {
+ if (this == Other) return true;
+
+ if (isPacked() != Other->isPacked())
+ return false;
+
+ return elements() == Other->elements();
+}
+
+Type *StructType::getTypeAtIndex(const Value *V) const {
+ unsigned Idx = (unsigned)cast<Constant>(V)->getUniqueInteger().getZExtValue();
+ assert(indexValid(Idx) && "Invalid structure index!");
+ return getElementType(Idx);
+}
+
+bool StructType::indexValid(const Value *V) const {
+ // Structure indexes require (vectors of) 32-bit integer constants. In the
+ // vector case all of the indices must be equal.
+ if (!V->getType()->isIntOrIntVectorTy(32))
+ return false;
+ if (isa<ScalableVectorType>(V->getType()))
+ return false;
+ const Constant *C = dyn_cast<Constant>(V);
+ if (C && V->getType()->isVectorTy())
+ C = C->getSplatValue();
+ const ConstantInt *CU = dyn_cast_or_null<ConstantInt>(C);
+ return CU && CU->getZExtValue() < getNumElements();
+}
+
+StructType *StructType::getTypeByName(LLVMContext &C, StringRef Name) {
+ return C.pImpl->NamedStructTypes.lookup(Name);
+}
+
+//===----------------------------------------------------------------------===//
+// ArrayType Implementation
+//===----------------------------------------------------------------------===//
+
+ArrayType::ArrayType(Type *ElType, uint64_t NumEl)
+ : Type(ElType->getContext(), ArrayTyID), ContainedType(ElType),
+ NumElements(NumEl) {
+ ContainedTys = &ContainedType;
+ NumContainedTys = 1;
+}
+
+ArrayType *ArrayType::get(Type *ElementType, uint64_t NumElements) {
+ assert(isValidElementType(ElementType) && "Invalid type for array element!");
+
+ LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
+ ArrayType *&Entry =
+ pImpl->ArrayTypes[std::make_pair(ElementType, NumElements)];
+
+ if (!Entry)
+ Entry = new (pImpl->Alloc) ArrayType(ElementType, NumElements);
+ return Entry;
+}
+
+bool ArrayType::isValidElementType(Type *ElemTy) {
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy() &&
+ !ElemTy->isTokenTy() && !ElemTy->isX86_AMXTy() &&
+ !isa<ScalableVectorType>(ElemTy);
+}
+
+//===----------------------------------------------------------------------===//
+// VectorType Implementation
+//===----------------------------------------------------------------------===//
+
+VectorType::VectorType(Type *ElType, unsigned EQ, Type::TypeID TID)
+ : Type(ElType->getContext(), TID), ContainedType(ElType),
+ ElementQuantity(EQ) {
+ ContainedTys = &ContainedType;
+ NumContainedTys = 1;
+}
+
+VectorType *VectorType::get(Type *ElementType, ElementCount EC) {
+ if (EC.isScalable())
+ return ScalableVectorType::get(ElementType, EC.getKnownMinValue());
+ else
+ return FixedVectorType::get(ElementType, EC.getKnownMinValue());
+}
+
+bool VectorType::isValidElementType(Type *ElemTy) {
+ return ElemTy->isIntegerTy() || ElemTy->isFloatingPointTy() ||
+ ElemTy->isPointerTy();
+}
+
+//===----------------------------------------------------------------------===//
+// FixedVectorType Implementation
+//===----------------------------------------------------------------------===//
+
+FixedVectorType *FixedVectorType::get(Type *ElementType, unsigned NumElts) {
+ assert(NumElts > 0 && "#Elements of a VectorType must be greater than 0");
+ assert(isValidElementType(ElementType) && "Element type of a VectorType must "
+ "be an integer, floating point, or "
+ "pointer type.");
+
+ auto EC = ElementCount::getFixed(NumElts);
+
+ LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
+ VectorType *&Entry = ElementType->getContext()
+ .pImpl->VectorTypes[std::make_pair(ElementType, EC)];
+
+ if (!Entry)
+ Entry = new (pImpl->Alloc) FixedVectorType(ElementType, NumElts);
+ return cast<FixedVectorType>(Entry);
+}
+
+//===----------------------------------------------------------------------===//
+// ScalableVectorType Implementation
+//===----------------------------------------------------------------------===//
+
+ScalableVectorType *ScalableVectorType::get(Type *ElementType,
+ unsigned MinNumElts) {
+ assert(MinNumElts > 0 && "#Elements of a VectorType must be greater than 0");
+ assert(isValidElementType(ElementType) && "Element type of a VectorType must "
+ "be an integer, floating point, or "
+ "pointer type.");
+
+ auto EC = ElementCount::getScalable(MinNumElts);
+
+ LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
+ VectorType *&Entry = ElementType->getContext()
+ .pImpl->VectorTypes[std::make_pair(ElementType, EC)];
+
+ if (!Entry)
+ Entry = new (pImpl->Alloc) ScalableVectorType(ElementType, MinNumElts);
+ return cast<ScalableVectorType>(Entry);
+}
+
+//===----------------------------------------------------------------------===//
+// PointerType Implementation
+//===----------------------------------------------------------------------===//
+
+PointerType *PointerType::get(Type *EltTy, unsigned AddressSpace) {
+ assert(EltTy && "Can't get a pointer to <null> type!");
+ assert(isValidElementType(EltTy) && "Invalid type for pointer element!");
+
+ LLVMContextImpl *CImpl = EltTy->getContext().pImpl;
+
+ // Automatically convert typed pointers to opaque pointers.
+ if (CImpl->getOpaquePointers())
+ return get(EltTy->getContext(), AddressSpace);
+
+ // Since AddressSpace #0 is the common case, we special case it.
+ PointerType *&Entry = AddressSpace == 0 ? CImpl->PointerTypes[EltTy]
+ : CImpl->ASPointerTypes[std::make_pair(EltTy, AddressSpace)];
+
+ if (!Entry)
+ Entry = new (CImpl->Alloc) PointerType(EltTy, AddressSpace);
+ return Entry;
+}
+
+PointerType *PointerType::get(LLVMContext &C, unsigned AddressSpace) {
+ LLVMContextImpl *CImpl = C.pImpl;
+ assert(CImpl->getOpaquePointers() &&
+ "Can only create opaque pointers in opaque pointer mode");
+
+ // Since AddressSpace #0 is the common case, we special case it.
+ PointerType *&Entry =
+ AddressSpace == 0
+ ? CImpl->PointerTypes[nullptr]
+ : CImpl->ASPointerTypes[std::make_pair(nullptr, AddressSpace)];
+
+ if (!Entry)
+ Entry = new (CImpl->Alloc) PointerType(C, AddressSpace);
+ return Entry;
+}
+
+PointerType::PointerType(Type *E, unsigned AddrSpace)
+ : Type(E->getContext(), PointerTyID), PointeeTy(E) {
+ ContainedTys = &PointeeTy;
+ NumContainedTys = 1;
+ setSubclassData(AddrSpace);
+}
+
+PointerType::PointerType(LLVMContext &C, unsigned AddrSpace)
+ : Type(C, PointerTyID), PointeeTy(nullptr) {
+ setSubclassData(AddrSpace);
+}
+
+PointerType *Type::getPointerTo(unsigned AddrSpace) const {
+ return PointerType::get(const_cast<Type*>(this), AddrSpace);
+}
+
+bool PointerType::isValidElementType(Type *ElemTy) {
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isTokenTy() &&
+ !ElemTy->isX86_AMXTy();
+}
+
+bool PointerType::isLoadableOrStorableType(Type *ElemTy) {
+ return isValidElementType(ElemTy) && !ElemTy->isFunctionTy();
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp b/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp
new file mode 100644
index 000000000000..904af7e737cc
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/TypeFinder.cpp
@@ -0,0 +1,202 @@
+//===- TypeFinder.cpp - Implement the TypeFinder class --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TypeFinder class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/TypeFinder.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <utility>
+
+using namespace llvm;
+
+void TypeFinder::run(const Module &M, bool onlyNamed) {
+ OnlyNamed = onlyNamed;
+
+ // Get types from global variables.
+ for (const auto &G : M.globals()) {
+ incorporateType(G.getValueType());
+ if (G.hasInitializer())
+ incorporateValue(G.getInitializer());
+ }
+
+ // Get types from aliases.
+ for (const auto &A : M.aliases()) {
+ incorporateType(A.getValueType());
+ if (const Value *Aliasee = A.getAliasee())
+ incorporateValue(Aliasee);
+ }
+
+ // Get types from ifuncs.
+ for (const auto &GI : M.ifuncs())
+ incorporateType(GI.getValueType());
+
+ // Get types from functions.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDForInst;
+ for (const Function &FI : M) {
+ incorporateType(FI.getFunctionType());
+ incorporateAttributes(FI.getAttributes());
+
+ for (const Use &U : FI.operands())
+ incorporateValue(U.get());
+
+ // First incorporate the arguments.
+ for (const auto &A : FI.args())
+ incorporateValue(&A);
+
+ for (const BasicBlock &BB : FI)
+ for (const Instruction &I : BB) {
+ // Incorporate the type of the instruction.
+ incorporateType(I.getType());
+
+ // Incorporate non-instruction operand types. (We are incorporating all
+ // instructions with this loop.)
+ for (const auto &O : I.operands())
+ if (&*O && !isa<Instruction>(&*O))
+ incorporateValue(&*O);
+
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
+ incorporateType(GEP->getSourceElementType());
+ if (auto *AI = dyn_cast<AllocaInst>(&I))
+ incorporateType(AI->getAllocatedType());
+ if (const auto *CB = dyn_cast<CallBase>(&I))
+ incorporateAttributes(CB->getAttributes());
+
+ // Incorporate types hiding in metadata.
+ I.getAllMetadataOtherThanDebugLoc(MDForInst);
+ for (const auto &MD : MDForInst)
+ incorporateMDNode(MD.second);
+ MDForInst.clear();
+ }
+ }
+
+ for (const auto &NMD : M.named_metadata())
+ for (const auto *MDOp : NMD.operands())
+ incorporateMDNode(MDOp);
+}
+
+void TypeFinder::clear() {
+ VisitedConstants.clear();
+ VisitedTypes.clear();
+ StructTypes.clear();
+}
+
+/// incorporateType - This method adds the type to the list of used structures
+/// if it's not in there already.
+void TypeFinder::incorporateType(Type *Ty) {
+ // Check to see if we've already visited this type.
+ if (!VisitedTypes.insert(Ty).second)
+ return;
+
+ SmallVector<Type *, 4> TypeWorklist;
+ TypeWorklist.push_back(Ty);
+ do {
+ Ty = TypeWorklist.pop_back_val();
+
+ // If this is a structure or opaque type, add a name for the type.
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!OnlyNamed || STy->hasName())
+ StructTypes.push_back(STy);
+
+ // Add all unvisited subtypes to worklist for processing
+ for (Type *SubTy : llvm::reverse(Ty->subtypes()))
+ if (VisitedTypes.insert(SubTy).second)
+ TypeWorklist.push_back(SubTy);
+ } while (!TypeWorklist.empty());
+}
+
+/// incorporateValue - This method is used to walk operand lists finding types
+/// hiding in constant expressions and other operands that won't be walked in
+/// other ways. GlobalValues, basic blocks, instructions, and inst operands are
+/// all explicitly enumerated.
+void TypeFinder::incorporateValue(const Value *V) {
+ if (const auto *M = dyn_cast<MetadataAsValue>(V)) {
+ if (const auto *N = dyn_cast<MDNode>(M->getMetadata()))
+ return incorporateMDNode(N);
+ if (const auto *MDV = dyn_cast<ValueAsMetadata>(M->getMetadata()))
+ return incorporateValue(MDV->getValue());
+ return;
+ }
+
+ if (!isa<Constant>(V) || isa<GlobalValue>(V)) return;
+
+ // Already visited?
+ if (!VisitedConstants.insert(V).second)
+ return;
+
+ // Check this type.
+ incorporateType(V->getType());
+
+ // If this is an instruction, we incorporate it separately.
+ if (isa<Instruction>(V))
+ return;
+
+ if (auto *GEP = dyn_cast<GEPOperator>(V))
+ incorporateType(GEP->getSourceElementType());
+
+ // Look in operands for types.
+ const User *U = cast<User>(V);
+ for (const auto &I : U->operands())
+ incorporateValue(&*I);
+}
+
+/// incorporateMDNode - This method is used to walk the operands of an MDNode to
+/// find types hiding within.
+void TypeFinder::incorporateMDNode(const MDNode *V) {
+ // Already visited?
+ if (!VisitedMetadata.insert(V).second)
+ return;
+
+ // The arguments in DIArgList are not exposed as operands, so handle such
+ // nodes specifically here.
+ if (const auto *AL = dyn_cast<DIArgList>(V)) {
+ for (auto *Arg : AL->getArgs())
+ incorporateValue(Arg->getValue());
+ return;
+ }
+
+ // Look in operands for types.
+ for (Metadata *Op : V->operands()) {
+ if (!Op)
+ continue;
+ if (auto *N = dyn_cast<MDNode>(Op)) {
+ incorporateMDNode(N);
+ continue;
+ }
+ if (auto *C = dyn_cast<ConstantAsMetadata>(Op)) {
+ incorporateValue(C->getValue());
+ continue;
+ }
+ }
+}
+
+void TypeFinder::incorporateAttributes(AttributeList AL) {
+ if (!VisitedAttributes.insert(AL).second)
+ return;
+
+ for (AttributeSet AS : AL)
+ for (Attribute A : AS)
+ if (A.isTypeAttribute())
+ incorporateType(A.getValueAsType());
+}
diff --git a/contrib/llvm-project/llvm/lib/IR/Use.cpp b/contrib/llvm-project/llvm/lib/IR/Use.cpp
new file mode 100644
index 000000000000..99a89386d75f
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Use.cpp
@@ -0,0 +1,42 @@
+//===-- Use.cpp - Implement the Use class ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+
+namespace llvm {
+
+void Use::swap(Use &RHS) {
+ if (Val == RHS.Val)
+ return;
+
+ std::swap(Val, RHS.Val);
+ std::swap(Next, RHS.Next);
+ std::swap(Prev, RHS.Prev);
+
+ *Prev = this;
+ if (Next)
+ Next->Prev = &Next;
+
+ *RHS.Prev = &RHS;
+ if (RHS.Next)
+ RHS.Next->Prev = &RHS.Next;
+}
+
+unsigned Use::getOperandNo() const {
+ return this - getUser()->op_begin();
+}
+
+void Use::zap(Use *Start, const Use *Stop, bool del) {
+ while (Start != Stop)
+ (--Stop)->~Use();
+ if (del)
+ ::operator delete(Start);
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/User.cpp b/contrib/llvm-project/llvm/lib/IR/User.cpp
new file mode 100644
index 000000000000..637af7aaa245
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/User.cpp
@@ -0,0 +1,206 @@
+//===-- User.cpp - Implement the User class -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/User.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/IntrinsicInst.h"
+
+namespace llvm {
+class BasicBlock;
+
+//===----------------------------------------------------------------------===//
+// User Class
+//===----------------------------------------------------------------------===//
+
+bool User::replaceUsesOfWith(Value *From, Value *To) {
+ bool Changed = false;
+ if (From == To) return Changed; // Duh what?
+
+ assert((!isa<Constant>(this) || isa<GlobalValue>(this)) &&
+ "Cannot call User::replaceUsesOfWith on a constant!");
+
+ for (unsigned i = 0, E = getNumOperands(); i != E; ++i)
+ if (getOperand(i) == From) { // Is This operand is pointing to oldval?
+ // The side effects of this setOperand call include linking to
+ // "To", adding "this" to the uses list of To, and
+ // most importantly, removing "this" from the use list of "From".
+ setOperand(i, To);
+ Changed = true;
+ }
+ if (auto DVI = dyn_cast_or_null<DbgVariableIntrinsic>(this)) {
+ if (is_contained(DVI->location_ops(), From)) {
+ DVI->replaceVariableLocationOp(From, To);
+ Changed = true;
+ }
+ }
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// User allocHungoffUses Implementation
+//===----------------------------------------------------------------------===//
+
+void User::allocHungoffUses(unsigned N, bool IsPhi) {
+ assert(HasHungOffUses && "alloc must have hung off uses");
+
+ static_assert(alignof(Use) >= alignof(BasicBlock *),
+ "Alignment is insufficient for 'hung-off-uses' pieces");
+
+ // Allocate the array of Uses
+ size_t size = N * sizeof(Use);
+ if (IsPhi)
+ size += N * sizeof(BasicBlock *);
+ Use *Begin = static_cast<Use*>(::operator new(size));
+ Use *End = Begin + N;
+ setOperandList(Begin);
+ for (; Begin != End; Begin++)
+ new (Begin) Use(this);
+}
+
+void User::growHungoffUses(unsigned NewNumUses, bool IsPhi) {
+ assert(HasHungOffUses && "realloc must have hung off uses");
+
+ unsigned OldNumUses = getNumOperands();
+
+ // We don't support shrinking the number of uses. We wouldn't have enough
+ // space to copy the old uses in to the new space.
+ assert(NewNumUses > OldNumUses && "realloc must grow num uses");
+
+ Use *OldOps = getOperandList();
+ allocHungoffUses(NewNumUses, IsPhi);
+ Use *NewOps = getOperandList();
+
+ // Now copy from the old operands list to the new one.
+ std::copy(OldOps, OldOps + OldNumUses, NewOps);
+
+ // If this is a Phi, then we need to copy the BB pointers too.
+ if (IsPhi) {
+ auto *OldPtr = reinterpret_cast<char *>(OldOps + OldNumUses);
+ auto *NewPtr = reinterpret_cast<char *>(NewOps + NewNumUses);
+ std::copy(OldPtr, OldPtr + (OldNumUses * sizeof(BasicBlock *)), NewPtr);
+ }
+ Use::zap(OldOps, OldOps + OldNumUses, true);
+}
+
+
+// This is a private struct used by `User` to track the co-allocated descriptor
+// section.
+struct DescriptorInfo {
+ intptr_t SizeInBytes;
+};
+
+ArrayRef<const uint8_t> User::getDescriptor() const {
+ auto MutableARef = const_cast<User *>(this)->getDescriptor();
+ return {MutableARef.begin(), MutableARef.end()};
+}
+
+MutableArrayRef<uint8_t> User::getDescriptor() {
+ assert(HasDescriptor && "Don't call otherwise!");
+ assert(!HasHungOffUses && "Invariant!");
+
+ auto *DI = reinterpret_cast<DescriptorInfo *>(getIntrusiveOperands()) - 1;
+ assert(DI->SizeInBytes != 0 && "Should not have had a descriptor otherwise!");
+
+ return MutableArrayRef<uint8_t>(
+ reinterpret_cast<uint8_t *>(DI) - DI->SizeInBytes, DI->SizeInBytes);
+}
+
+bool User::isDroppable() const {
+ return isa<AssumeInst>(this) || isa<PseudoProbeInst>(this);
+}
+
+//===----------------------------------------------------------------------===//
+// User operator new Implementations
+//===----------------------------------------------------------------------===//
+
+void *User::allocateFixedOperandUser(size_t Size, unsigned Us,
+ unsigned DescBytes) {
+ assert(Us < (1u << NumUserOperandsBits) && "Too many operands");
+
+ static_assert(sizeof(DescriptorInfo) % sizeof(void *) == 0, "Required below");
+
+ unsigned DescBytesToAllocate =
+ DescBytes == 0 ? 0 : (DescBytes + sizeof(DescriptorInfo));
+ assert(DescBytesToAllocate % sizeof(void *) == 0 &&
+ "We need this to satisfy alignment constraints for Uses");
+
+ uint8_t *Storage = static_cast<uint8_t *>(
+ ::operator new(Size + sizeof(Use) * Us + DescBytesToAllocate));
+ Use *Start = reinterpret_cast<Use *>(Storage + DescBytesToAllocate);
+ Use *End = Start + Us;
+ User *Obj = reinterpret_cast<User*>(End);
+ Obj->NumUserOperands = Us;
+ Obj->HasHungOffUses = false;
+ Obj->HasDescriptor = DescBytes != 0;
+ for (; Start != End; Start++)
+ new (Start) Use(Obj);
+
+ if (DescBytes != 0) {
+ auto *DescInfo = reinterpret_cast<DescriptorInfo *>(Storage + DescBytes);
+ DescInfo->SizeInBytes = DescBytes;
+ }
+
+ return Obj;
+}
+
+void *User::operator new(size_t Size, unsigned Us) {
+ return allocateFixedOperandUser(Size, Us, 0);
+}
+
+void *User::operator new(size_t Size, unsigned Us, unsigned DescBytes) {
+ return allocateFixedOperandUser(Size, Us, DescBytes);
+}
+
+void *User::operator new(size_t Size) {
+ // Allocate space for a single Use*
+ void *Storage = ::operator new(Size + sizeof(Use *));
+ Use **HungOffOperandList = static_cast<Use **>(Storage);
+ User *Obj = reinterpret_cast<User *>(HungOffOperandList + 1);
+ Obj->NumUserOperands = 0;
+ Obj->HasHungOffUses = true;
+ Obj->HasDescriptor = false;
+ *HungOffOperandList = nullptr;
+ return Obj;
+}
+
+//===----------------------------------------------------------------------===//
+// User operator delete Implementation
+//===----------------------------------------------------------------------===//
+
+// Repress memory sanitization, due to use-after-destroy by operator
+// delete. Bug report 24578 identifies this issue.
+LLVM_NO_SANITIZE_MEMORY_ATTRIBUTE void User::operator delete(void *Usr) {
+ // Hung off uses use a single Use* before the User, while other subclasses
+ // use a Use[] allocated prior to the user.
+ User *Obj = static_cast<User *>(Usr);
+ if (Obj->HasHungOffUses) {
+ assert(!Obj->HasDescriptor && "not supported!");
+
+ Use **HungOffOperandList = static_cast<Use **>(Usr) - 1;
+ // drop the hung off uses.
+ Use::zap(*HungOffOperandList, *HungOffOperandList + Obj->NumUserOperands,
+ /* Delete */ true);
+ ::operator delete(HungOffOperandList);
+ } else if (Obj->HasDescriptor) {
+ Use *UseBegin = static_cast<Use *>(Usr) - Obj->NumUserOperands;
+ Use::zap(UseBegin, UseBegin + Obj->NumUserOperands, /* Delete */ false);
+
+ auto *DI = reinterpret_cast<DescriptorInfo *>(UseBegin) - 1;
+ uint8_t *Storage = reinterpret_cast<uint8_t *>(DI) - DI->SizeInBytes;
+ ::operator delete(Storage);
+ } else {
+ Use *Storage = static_cast<Use *>(Usr) - Obj->NumUserOperands;
+ Use::zap(Storage, Storage + Obj->NumUserOperands,
+ /* Delete */ false);
+ ::operator delete(Storage);
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/Value.cpp b/contrib/llvm-project/llvm/lib/IR/Value.cpp
new file mode 100644
index 000000000000..3990536f3da5
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Value.cpp
@@ -0,0 +1,1244 @@
+//===-- Value.cpp - Implement the Value class -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Value, ValueHandle, and User classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Value.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DerivedUser.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace llvm;
+
+static cl::opt<unsigned> UseDerefAtPointSemantics(
+ "use-dereferenceable-at-point-semantics", cl::Hidden, cl::init(false),
+ cl::desc("Deref attributes and metadata infer facts at definition only"));
+
+//===----------------------------------------------------------------------===//
+// Value Class
+//===----------------------------------------------------------------------===//
+static inline Type *checkType(Type *Ty) {
+ assert(Ty && "Value defined with a null type: Error!");
+ return Ty;
+}
+
+Value::Value(Type *ty, unsigned scid)
+ : VTy(checkType(ty)), UseList(nullptr), SubclassID(scid), HasValueHandle(0),
+ SubclassOptionalData(0), SubclassData(0), NumUserOperands(0),
+ IsUsedByMD(false), HasName(false), HasMetadata(false) {
+ static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)");
+ // FIXME: Why isn't this in the subclass gunk??
+ // Note, we cannot call isa<CallInst> before the CallInst has been
+ // constructed.
+ unsigned OpCode = 0;
+ if (SubclassID >= InstructionVal)
+ OpCode = SubclassID - InstructionVal;
+ if (OpCode == Instruction::Call || OpCode == Instruction::Invoke ||
+ OpCode == Instruction::CallBr)
+ assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
+ "invalid CallBase type!");
+ else if (SubclassID != BasicBlockVal &&
+ (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal))
+ assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
+ "Cannot create non-first-class values except for constants!");
+ static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
+ "Value too big");
+}
+
+Value::~Value() {
+ // Notify all ValueHandles (if present) that this value is going away.
+ if (HasValueHandle)
+ ValueHandleBase::ValueIsDeleted(this);
+ if (isUsedByMetadata())
+ ValueAsMetadata::handleDeletion(this);
+
+ // Remove associated metadata from context.
+ if (HasMetadata)
+ clearMetadata();
+
+#ifndef NDEBUG // Only in -g mode...
+ // Check to make sure that there are no uses of this value that are still
+ // around when the value is destroyed. If there are, then we have a dangling
+ // reference and something is wrong. This code is here to print out where
+ // the value is still being referenced.
+ //
+ // Note that use_empty() cannot be called here, as it eventually downcasts
+ // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
+ // been destructed, so accessing it is UB.
+ //
+ if (!materialized_use_empty()) {
+ dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
+ for (auto *U : users())
+ dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
+ }
+#endif
+ assert(materialized_use_empty() && "Uses remain when a value is destroyed!");
+
+ // If this value is named, destroy the name. This should not be in a symtab
+ // at this point.
+ destroyValueName();
+}
+
+void Value::deleteValue() {
+ switch (getValueID()) {
+#define HANDLE_VALUE(Name) \
+ case Value::Name##Val: \
+ delete static_cast<Name *>(this); \
+ break;
+#define HANDLE_MEMORY_VALUE(Name) \
+ case Value::Name##Val: \
+ static_cast<DerivedUser *>(this)->DeleteValue( \
+ static_cast<DerivedUser *>(this)); \
+ break;
+#define HANDLE_CONSTANT(Name) \
+ case Value::Name##Val: \
+ llvm_unreachable("constants should be destroyed with destroyConstant"); \
+ break;
+#define HANDLE_INSTRUCTION(Name) /* nothing */
+#include "llvm/IR/Value.def"
+
+#define HANDLE_INST(N, OPC, CLASS) \
+ case Value::InstructionVal + Instruction::OPC: \
+ delete static_cast<CLASS *>(this); \
+ break;
+#define HANDLE_USER_INST(N, OPC, CLASS)
+#include "llvm/IR/Instruction.def"
+
+ default:
+ llvm_unreachable("attempting to delete unknown value kind");
+ }
+}
+
+void Value::destroyValueName() {
+ ValueName *Name = getValueName();
+ if (Name) {
+ MallocAllocator Allocator;
+ Name->Destroy(Allocator);
+ }
+ setValueName(nullptr);
+}
+
+bool Value::hasNUses(unsigned N) const {
+ return hasNItems(use_begin(), use_end(), N);
+}
+
+bool Value::hasNUsesOrMore(unsigned N) const {
+ return hasNItemsOrMore(use_begin(), use_end(), N);
+}
+
+bool Value::hasOneUser() const {
+ if (use_empty())
+ return false;
+ if (hasOneUse())
+ return true;
+ return std::equal(++user_begin(), user_end(), user_begin());
+}
+
+static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
+
+Use *Value::getSingleUndroppableUse() {
+ Use *Result = nullptr;
+ for (Use &U : uses()) {
+ if (!U.getUser()->isDroppable()) {
+ if (Result)
+ return nullptr;
+ Result = &U;
+ }
+ }
+ return Result;
+}
+
+User *Value::getUniqueUndroppableUser() {
+ User *Result = nullptr;
+ for (auto *U : users()) {
+ if (!U->isDroppable()) {
+ if (Result && Result != U)
+ return nullptr;
+ Result = U;
+ }
+ }
+ return Result;
+}
+
+bool Value::hasNUndroppableUses(unsigned int N) const {
+ return hasNItems(user_begin(), user_end(), N, isUnDroppableUser);
+}
+
+bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
+ return hasNItemsOrMore(user_begin(), user_end(), N, isUnDroppableUser);
+}
+
+void Value::dropDroppableUses(
+ llvm::function_ref<bool(const Use *)> ShouldDrop) {
+ SmallVector<Use *, 8> ToBeEdited;
+ for (Use &U : uses())
+ if (U.getUser()->isDroppable() && ShouldDrop(&U))
+ ToBeEdited.push_back(&U);
+ for (Use *U : ToBeEdited)
+ dropDroppableUse(*U);
+}
+
+void Value::dropDroppableUsesIn(User &Usr) {
+ assert(Usr.isDroppable() && "Expected a droppable user!");
+ for (Use &UsrOp : Usr.operands()) {
+ if (UsrOp.get() == this)
+ dropDroppableUse(UsrOp);
+ }
+}
+
+void Value::dropDroppableUse(Use &U) {
+ U.removeFromList();
+ if (auto *Assume = dyn_cast<AssumeInst>(U.getUser())) {
+ unsigned OpNo = U.getOperandNo();
+ if (OpNo == 0)
+ U.set(ConstantInt::getTrue(Assume->getContext()));
+ else {
+ U.set(UndefValue::get(U.get()->getType()));
+ CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpNo);
+ BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag("ignore");
+ }
+ return;
+ }
+
+ llvm_unreachable("unkown droppable use");
+}
+
+bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
+ // This can be computed either by scanning the instructions in BB, or by
+ // scanning the use list of this Value. Both lists can be very long, but
+ // usually one is quite short.
+ //
+ // Scan both lists simultaneously until one is exhausted. This limits the
+ // search to the shorter list.
+ BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
+ const_user_iterator UI = user_begin(), UE = user_end();
+ for (; BI != BE && UI != UE; ++BI, ++UI) {
+ // Scan basic block: Check if this Value is used by the instruction at BI.
+ if (is_contained(BI->operands(), this))
+ return true;
+ // Scan use list: Check if the use at UI is in BB.
+ const auto *User = dyn_cast<Instruction>(*UI);
+ if (User && User->getParent() == BB)
+ return true;
+ }
+ return false;
+}
+
+unsigned Value::getNumUses() const {
+ return (unsigned)std::distance(use_begin(), use_end());
+}
+
+static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
+ ST = nullptr;
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (BasicBlock *P = I->getParent())
+ if (Function *PP = P->getParent())
+ ST = PP->getValueSymbolTable();
+ } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) {
+ if (Function *P = BB->getParent())
+ ST = P->getValueSymbolTable();
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (Module *P = GV->getParent())
+ ST = &P->getValueSymbolTable();
+ } else if (Argument *A = dyn_cast<Argument>(V)) {
+ if (Function *P = A->getParent())
+ ST = P->getValueSymbolTable();
+ } else {
+ assert(isa<Constant>(V) && "Unknown value type!");
+ return true; // no name is setable for this.
+ }
+ return false;
+}
+
+ValueName *Value::getValueName() const {
+ if (!HasName) return nullptr;
+
+ LLVMContext &Ctx = getContext();
+ auto I = Ctx.pImpl->ValueNames.find(this);
+ assert(I != Ctx.pImpl->ValueNames.end() &&
+ "No name entry found!");
+
+ return I->second;
+}
+
+void Value::setValueName(ValueName *VN) {
+ LLVMContext &Ctx = getContext();
+
+ assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
+ "HasName bit out of sync!");
+
+ if (!VN) {
+ if (HasName)
+ Ctx.pImpl->ValueNames.erase(this);
+ HasName = false;
+ return;
+ }
+
+ HasName = true;
+ Ctx.pImpl->ValueNames[this] = VN;
+}
+
+StringRef Value::getName() const {
+ // Make sure the empty string is still a C string. For historical reasons,
+ // some clients want to call .data() on the result and expect it to be null
+ // terminated.
+ if (!hasName())
+ return StringRef("", 0);
+ return getValueName()->getKey();
+}
+
+void Value::setNameImpl(const Twine &NewName) {
+ // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
+ if (getContext().shouldDiscardValueNames() && !isa<GlobalValue>(this))
+ return;
+
+ // Fast path for common IRBuilder case of setName("") when there is no name.
+ if (NewName.isTriviallyEmpty() && !hasName())
+ return;
+
+ SmallString<256> NameData;
+ StringRef NameRef = NewName.toStringRef(NameData);
+ assert(NameRef.find_first_of(0) == StringRef::npos &&
+ "Null bytes are not allowed in names");
+
+ // Name isn't changing?
+ if (getName() == NameRef)
+ return;
+
+ assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
+
+ // Get the symbol table to update for this object.
+ ValueSymbolTable *ST;
+ if (getSymTab(this, ST))
+ return; // Cannot set a name on this value (e.g. constant).
+
+ if (!ST) { // No symbol table to update? Just do the change.
+ if (NameRef.empty()) {
+ // Free the name for this value.
+ destroyValueName();
+ return;
+ }
+
+ // NOTE: Could optimize for the case the name is shrinking to not deallocate
+ // then reallocated.
+ destroyValueName();
+
+ // Create the new name.
+ MallocAllocator Allocator;
+ setValueName(ValueName::Create(NameRef, Allocator));
+ getValueName()->setValue(this);
+ return;
+ }
+
+ // NOTE: Could optimize for the case the name is shrinking to not deallocate
+ // then reallocated.
+ if (hasName()) {
+ // Remove old name.
+ ST->removeValueName(getValueName());
+ destroyValueName();
+
+ if (NameRef.empty())
+ return;
+ }
+
+ // Name is changing to something new.
+ setValueName(ST->createValueName(NameRef, this));
+}
+
+void Value::setName(const Twine &NewName) {
+ setNameImpl(NewName);
+ if (Function *F = dyn_cast<Function>(this))
+ F->recalculateIntrinsicID();
+}
+
+void Value::takeName(Value *V) {
+ assert(V != this && "Illegal call to this->takeName(this)!");
+ ValueSymbolTable *ST = nullptr;
+ // If this value has a name, drop it.
+ if (hasName()) {
+ // Get the symtab this is in.
+ if (getSymTab(this, ST)) {
+ // We can't set a name on this value, but we need to clear V's name if
+ // it has one.
+ if (V->hasName()) V->setName("");
+ return; // Cannot set a name on this value (e.g. constant).
+ }
+
+ // Remove old name.
+ if (ST)
+ ST->removeValueName(getValueName());
+ destroyValueName();
+ }
+
+ // Now we know that this has no name.
+
+ // If V has no name either, we're done.
+ if (!V->hasName()) return;
+
+ // Get this's symtab if we didn't before.
+ if (!ST) {
+ if (getSymTab(this, ST)) {
+ // Clear V's name.
+ V->setName("");
+ return; // Cannot set a name on this value (e.g. constant).
+ }
+ }
+
+ // Get V's ST, this should always succeed, because V has a name.
+ ValueSymbolTable *VST;
+ bool Failure = getSymTab(V, VST);
+ assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
+
+ // If these values are both in the same symtab, we can do this very fast.
+ // This works even if both values have no symtab yet.
+ if (ST == VST) {
+ // Take the name!
+ setValueName(V->getValueName());
+ V->setValueName(nullptr);
+ getValueName()->setValue(this);
+ return;
+ }
+
+ // Otherwise, things are slightly more complex. Remove V's name from VST and
+ // then reinsert it into ST.
+
+ if (VST)
+ VST->removeValueName(V->getValueName());
+ setValueName(V->getValueName());
+ V->setValueName(nullptr);
+ getValueName()->setValue(this);
+
+ if (ST)
+ ST->reinsertValue(this);
+}
+
+#ifndef NDEBUG
+std::string Value::getNameOrAsOperand() const {
+ if (!getName().empty())
+ return std::string(getName());
+
+ std::string BBName;
+ raw_string_ostream OS(BBName);
+ printAsOperand(OS, false);
+ return OS.str();
+}
+#endif
+
+void Value::assertModuleIsMaterializedImpl() const {
+#ifndef NDEBUG
+ const GlobalValue *GV = dyn_cast<GlobalValue>(this);
+ if (!GV)
+ return;
+ const Module *M = GV->getParent();
+ if (!M)
+ return;
+ assert(M->isMaterialized());
+#endif
+}
+
+#ifndef NDEBUG
+static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
+ Constant *C) {
+ if (!Cache.insert(Expr).second)
+ return false;
+
+ for (auto &O : Expr->operands()) {
+ if (O == C)
+ return true;
+ auto *CE = dyn_cast<ConstantExpr>(O);
+ if (!CE)
+ continue;
+ if (contains(Cache, CE, C))
+ return true;
+ }
+ return false;
+}
+
+static bool contains(Value *Expr, Value *V) {
+ if (Expr == V)
+ return true;
+
+ auto *C = dyn_cast<Constant>(V);
+ if (!C)
+ return false;
+
+ auto *CE = dyn_cast<ConstantExpr>(Expr);
+ if (!CE)
+ return false;
+
+ SmallPtrSet<ConstantExpr *, 4> Cache;
+ return contains(Cache, CE, C);
+}
+#endif // NDEBUG
+
+void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
+ assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
+ assert(!contains(New, this) &&
+ "this->replaceAllUsesWith(expr(this)) is NOT valid!");
+ assert(New->getType() == getType() &&
+ "replaceAllUses of value with new value of different type!");
+
+ // Notify all ValueHandles (if present) that this value is going away.
+ if (HasValueHandle)
+ ValueHandleBase::ValueIsRAUWd(this, New);
+ if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
+ ValueAsMetadata::handleRAUW(this, New);
+
+ while (!materialized_use_empty()) {
+ Use &U = *UseList;
+ // Must handle Constants specially, we cannot call replaceUsesOfWith on a
+ // constant because they are uniqued.
+ if (auto *C = dyn_cast<Constant>(U.getUser())) {
+ if (!isa<GlobalValue>(C)) {
+ C->handleOperandChange(this, New);
+ continue;
+ }
+ }
+
+ U.set(New);
+ }
+
+ if (BasicBlock *BB = dyn_cast<BasicBlock>(this))
+ BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
+}
+
+void Value::replaceAllUsesWith(Value *New) {
+ doRAUW(New, ReplaceMetadataUses::Yes);
+}
+
+void Value::replaceNonMetadataUsesWith(Value *New) {
+ doRAUW(New, ReplaceMetadataUses::No);
+}
+
+void Value::replaceUsesWithIf(Value *New,
+ llvm::function_ref<bool(Use &U)> ShouldReplace) {
+ assert(New && "Value::replaceUsesWithIf(<null>) is invalid!");
+ assert(New->getType() == getType() &&
+ "replaceUses of value with new value of different type!");
+
+ SmallVector<TrackingVH<Constant>, 8> Consts;
+ SmallPtrSet<Constant *, 8> Visited;
+
+ for (Use &U : llvm::make_early_inc_range(uses())) {
+ if (!ShouldReplace(U))
+ continue;
+ // Must handle Constants specially, we cannot call replaceUsesOfWith on a
+ // constant because they are uniqued.
+ if (auto *C = dyn_cast<Constant>(U.getUser())) {
+ if (!isa<GlobalValue>(C)) {
+ if (Visited.insert(C).second)
+ Consts.push_back(TrackingVH<Constant>(C));
+ continue;
+ }
+ }
+ U.set(New);
+ }
+
+ while (!Consts.empty()) {
+ // FIXME: handleOperandChange() updates all the uses in a given Constant,
+ // not just the one passed to ShouldReplace
+ Consts.pop_back_val()->handleOperandChange(this, New);
+ }
+}
+
+/// Replace llvm.dbg.* uses of MetadataAsValue(ValueAsMetadata(V)) outside BB
+/// with New.
+static void replaceDbgUsesOutsideBlock(Value *V, Value *New, BasicBlock *BB) {
+ SmallVector<DbgVariableIntrinsic *> DbgUsers;
+ findDbgUsers(DbgUsers, V);
+ for (auto *DVI : DbgUsers) {
+ if (DVI->getParent() != BB)
+ DVI->replaceVariableLocationOp(V, New);
+ }
+}
+
+// Like replaceAllUsesWith except it does not handle constants or basic blocks.
+// This routine leaves uses within BB.
+void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
+ assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
+ assert(!contains(New, this) &&
+ "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
+ assert(New->getType() == getType() &&
+ "replaceUses of value with new value of different type!");
+ assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
+
+ replaceDbgUsesOutsideBlock(this, New, BB);
+ replaceUsesWithIf(New, [BB](Use &U) {
+ auto *I = dyn_cast<Instruction>(U.getUser());
+ // Don't replace if it's an instruction in the BB basic block.
+ return !I || I->getParent() != BB;
+ });
+}
+
+namespace {
+// Various metrics for how much to strip off of pointers.
+enum PointerStripKind {
+ PSK_ZeroIndices,
+ PSK_ZeroIndicesAndAliases,
+ PSK_ZeroIndicesSameRepresentation,
+ PSK_ForAliasAnalysis,
+ PSK_InBoundsConstantIndices,
+ PSK_InBounds
+};
+
+template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
+
+template <PointerStripKind StripKind>
+static const Value *stripPointerCastsAndOffsets(
+ const Value *V,
+ function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
+ if (!V->getType()->isPointerTy())
+ return V;
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<const Value *, 4> Visited;
+
+ Visited.insert(V);
+ do {
+ Func(V);
+ if (auto *GEP = dyn_cast<GEPOperator>(V)) {
+ switch (StripKind) {
+ case PSK_ZeroIndices:
+ case PSK_ZeroIndicesAndAliases:
+ case PSK_ZeroIndicesSameRepresentation:
+ case PSK_ForAliasAnalysis:
+ if (!GEP->hasAllZeroIndices())
+ return V;
+ break;
+ case PSK_InBoundsConstantIndices:
+ if (!GEP->hasAllConstantIndices())
+ return V;
+ LLVM_FALLTHROUGH;
+ case PSK_InBounds:
+ if (!GEP->isInBounds())
+ return V;
+ break;
+ }
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ if (!V->getType()->isPointerTy())
+ return V;
+ } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
+ Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
+ // TODO: If we know an address space cast will not change the
+ // representation we could look through it here as well.
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(V)) {
+ V = cast<GlobalAlias>(V)->getAliasee();
+ } else if (StripKind == PSK_ForAliasAnalysis && isa<PHINode>(V) &&
+ cast<PHINode>(V)->getNumIncomingValues() == 1) {
+ V = cast<PHINode>(V)->getIncomingValue(0);
+ } else {
+ if (const auto *Call = dyn_cast<CallBase>(V)) {
+ if (const Value *RV = Call->getReturnedArgOperand()) {
+ V = RV;
+ continue;
+ }
+ // The result of launder.invariant.group must alias it's argument,
+ // but it can't be marked with returned attribute, that's why it needs
+ // special case.
+ if (StripKind == PSK_ForAliasAnalysis &&
+ (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
+ Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
+ V = Call->getArgOperand(0);
+ continue;
+ }
+ }
+ return V;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ } while (Visited.insert(V).second);
+
+ return V;
+}
+} // end anonymous namespace
+
+const Value *Value::stripPointerCasts() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
+}
+
+const Value *Value::stripPointerCastsAndAliases() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
+}
+
+const Value *Value::stripPointerCastsSameRepresentation() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(this);
+}
+
+const Value *Value::stripInBoundsConstantOffsets() const {
+ return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
+}
+
+const Value *Value::stripPointerCastsForAliasAnalysis() const {
+ return stripPointerCastsAndOffsets<PSK_ForAliasAnalysis>(this);
+}
+
+const Value *Value::stripAndAccumulateConstantOffsets(
+ const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ bool AllowInvariantGroup,
+ function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
+ if (!getType()->isPtrOrPtrVectorTy())
+ return this;
+
+ unsigned BitWidth = Offset.getBitWidth();
+ assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) &&
+ "The offset bit width does not match the DL specification.");
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<const Value *, 4> Visited;
+ Visited.insert(this);
+ const Value *V = this;
+ do {
+ if (auto *GEP = dyn_cast<GEPOperator>(V)) {
+ // If in-bounds was requested, we do not strip non-in-bounds GEPs.
+ if (!AllowNonInbounds && !GEP->isInBounds())
+ return V;
+
+ // If one of the values we have visited is an addrspacecast, then
+ // the pointer type of this GEP may be different from the type
+ // of the Ptr parameter which was passed to this function. This
+ // means when we construct GEPOffset, we need to use the size
+ // of GEP's pointer type rather than the size of the original
+ // pointer type.
+ APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
+ if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
+ return V;
+
+ // Stop traversal if the pointer offset wouldn't fit in the bit-width
+ // provided by the Offset argument. This can happen due to AddrSpaceCast
+ // stripping.
+ if (GEPOffset.getMinSignedBits() > BitWidth)
+ return V;
+
+ // External Analysis can return a result higher/lower than the value
+ // represents. We need to detect overflow/underflow.
+ APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
+ if (!ExternalAnalysis) {
+ Offset += GEPOffsetST;
+ } else {
+ bool Overflow = false;
+ APInt OldOffset = Offset;
+ Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
+ if (Overflow) {
+ Offset = OldOffset;
+ return V;
+ }
+ }
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast ||
+ Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
+ if (!GA->isInterposable())
+ V = GA->getAliasee();
+ } else if (const auto *Call = dyn_cast<CallBase>(V)) {
+ if (const Value *RV = Call->getReturnedArgOperand())
+ V = RV;
+ if (AllowInvariantGroup && Call->isLaunderOrStripInvariantGroup())
+ V = Call->getArgOperand(0);
+ }
+ assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
+ } while (Visited.insert(V).second);
+
+ return V;
+}
+
+const Value *
+Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
+ return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
+}
+
+bool Value::canBeFreed() const {
+ assert(getType()->isPointerTy());
+
+ // Cases that can simply never be deallocated
+ // *) Constants aren't allocated per se, thus not deallocated either.
+ if (isa<Constant>(this))
+ return false;
+
+ // Handle byval/byref/sret/inalloca/preallocated arguments. The storage
+ // lifetime is guaranteed to be longer than the callee's lifetime.
+ if (auto *A = dyn_cast<Argument>(this)) {
+ if (A->hasPointeeInMemoryValueAttr())
+ return false;
+ // A pointer to an object in a function which neither frees, nor can arrange
+ // for another thread to free on its behalf, can not be freed in the scope
+ // of the function. Note that this logic is restricted to memory
+ // allocations in existance before the call; a nofree function *is* allowed
+ // to free memory it allocated.
+ const Function *F = A->getParent();
+ if (F->doesNotFreeMemory() && F->hasNoSync())
+ return false;
+ }
+
+ const Function *F = nullptr;
+ if (auto *I = dyn_cast<Instruction>(this))
+ F = I->getFunction();
+ if (auto *A = dyn_cast<Argument>(this))
+ F = A->getParent();
+
+ if (!F)
+ return true;
+
+ // With garbage collection, deallocation typically occurs solely at or after
+ // safepoints. If we're compiling for a collector which uses the
+ // gc.statepoint infrastructure, safepoints aren't explicitly present
+ // in the IR until after lowering from abstract to physical machine model.
+ // The collector could chose to mix explicit deallocation and gc'd objects
+ // which is why we need the explicit opt in on a per collector basis.
+ if (!F->hasGC())
+ return true;
+
+ const auto &GCName = F->getGC();
+ if (GCName == "statepoint-example") {
+ auto *PT = cast<PointerType>(this->getType());
+ if (PT->getAddressSpace() != 1)
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as
+ // our GC managed heap. This must match the same check in
+ // RewriteStatepointsForGC (and probably needs better factored.)
+ return true;
+
+ // It is cheaper to scan for a declaration than to scan for a use in this
+ // function. Note that gc.statepoint is a type overloaded function so the
+ // usual trick of requesting declaration of the intrinsic from the module
+ // doesn't work.
+ for (auto &Fn : *F->getParent())
+ if (Fn.getIntrinsicID() == Intrinsic::experimental_gc_statepoint)
+ return true;
+ return false;
+ }
+ return true;
+}
+
+uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
+ bool &CanBeNull,
+ bool &CanBeFreed) const {
+ assert(getType()->isPointerTy() && "must be pointer");
+
+ uint64_t DerefBytes = 0;
+ CanBeNull = false;
+ CanBeFreed = UseDerefAtPointSemantics && canBeFreed();
+ if (const Argument *A = dyn_cast<Argument>(this)) {
+ DerefBytes = A->getDereferenceableBytes();
+ if (DerefBytes == 0) {
+ // Handle byval/byref/inalloca/preallocated arguments
+ if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
+ if (ArgMemTy->isSized()) {
+ // FIXME: Why isn't this the type alloc size?
+ DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinSize();
+ }
+ }
+ }
+
+ if (DerefBytes == 0) {
+ DerefBytes = A->getDereferenceableOrNullBytes();
+ CanBeNull = true;
+ }
+ } else if (const auto *Call = dyn_cast<CallBase>(this)) {
+ DerefBytes = Call->getRetDereferenceableBytes();
+ if (DerefBytes == 0) {
+ DerefBytes = Call->getRetDereferenceableOrNullBytes();
+ CanBeNull = true;
+ }
+ } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
+ if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ if (DerefBytes == 0) {
+ if (MDNode *MD =
+ LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ CanBeNull = true;
+ }
+ } else if (auto *IP = dyn_cast<IntToPtrInst>(this)) {
+ if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ if (DerefBytes == 0) {
+ if (MDNode *MD =
+ IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ CanBeNull = true;
+ }
+ } else if (auto *AI = dyn_cast<AllocaInst>(this)) {
+ if (!AI->isArrayAllocation()) {
+ DerefBytes =
+ DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinSize();
+ CanBeNull = false;
+ CanBeFreed = false;
+ }
+ } else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
+ if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
+ // TODO: Don't outright reject hasExternalWeakLinkage but set the
+ // CanBeNull flag.
+ DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedSize();
+ CanBeNull = false;
+ CanBeFreed = false;
+ }
+ }
+ return DerefBytes;
+}
+
+Align Value::getPointerAlignment(const DataLayout &DL) const {
+ assert(getType()->isPointerTy() && "must be pointer");
+ if (auto *GO = dyn_cast<GlobalObject>(this)) {
+ if (isa<Function>(GO)) {
+ Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
+ switch (DL.getFunctionPtrAlignType()) {
+ case DataLayout::FunctionPtrAlignType::Independent:
+ return FunctionPtrAlign;
+ case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
+ return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne());
+ }
+ llvm_unreachable("Unhandled FunctionPtrAlignType");
+ }
+ const MaybeAlign Alignment(GO->getAlign());
+ if (!Alignment) {
+ if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
+ Type *ObjectType = GVar->getValueType();
+ if (ObjectType->isSized()) {
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (GVar->isStrongDefinitionForLinker())
+ return DL.getPreferredAlign(GVar);
+ else
+ return DL.getABITypeAlign(ObjectType);
+ }
+ }
+ }
+ return Alignment.valueOrOne();
+ } else if (const Argument *A = dyn_cast<Argument>(this)) {
+ const MaybeAlign Alignment = A->getParamAlign();
+ if (!Alignment && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = A->getParamStructRetType();
+ if (EltTy->isSized())
+ return DL.getABITypeAlign(EltTy);
+ }
+ return Alignment.valueOrOne();
+ } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
+ return AI->getAlign();
+ } else if (const auto *Call = dyn_cast<CallBase>(this)) {
+ MaybeAlign Alignment = Call->getRetAlign();
+ if (!Alignment && Call->getCalledFunction())
+ Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
+ return Alignment.valueOrOne();
+ } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
+ if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ return Align(CI->getLimitedValue());
+ }
+ } else if (auto *CstPtr = dyn_cast<Constant>(this)) {
+ // Strip pointer casts to avoid creating unnecessary ptrtoint expression
+ // if the only "reduction" is combining a bitcast + ptrtoint.
+ CstPtr = CstPtr->stripPointerCasts();
+ if (auto *CstInt = dyn_cast_or_null<ConstantInt>(ConstantExpr::getPtrToInt(
+ const_cast<Constant *>(CstPtr), DL.getIntPtrType(getType()),
+ /*OnlyIfReduced=*/true))) {
+ size_t TrailingZeros = CstInt->getValue().countTrailingZeros();
+ // While the actual alignment may be large, elsewhere we have
+ // an arbitrary upper alignmet limit, so let's clamp to it.
+ return Align(TrailingZeros < Value::MaxAlignmentExponent
+ ? uint64_t(1) << TrailingZeros
+ : Value::MaximumAlignment);
+ }
+ }
+ return Align(1);
+}
+
+const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
+ const BasicBlock *PredBB) const {
+ auto *PN = dyn_cast<PHINode>(this);
+ if (PN && PN->getParent() == CurBB)
+ return PN->getIncomingValueForBlock(PredBB);
+ return this;
+}
+
+LLVMContext &Value::getContext() const { return VTy->getContext(); }
+
+void Value::reverseUseList() {
+ if (!UseList || !UseList->Next)
+ // No need to reverse 0 or 1 uses.
+ return;
+
+ Use *Head = UseList;
+ Use *Current = UseList->Next;
+ Head->Next = nullptr;
+ while (Current) {
+ Use *Next = Current->Next;
+ Current->Next = Head;
+ Head->Prev = &Current->Next;
+ Head = Current;
+ Current = Next;
+ }
+ UseList = Head;
+ Head->Prev = &UseList;
+}
+
+bool Value::isSwiftError() const {
+ auto *Arg = dyn_cast<Argument>(this);
+ if (Arg)
+ return Arg->hasSwiftErrorAttr();
+ auto *Alloca = dyn_cast<AllocaInst>(this);
+ if (!Alloca)
+ return false;
+ return Alloca->isSwiftError();
+}
+
+bool Value::isTransitiveUsedByMetadataOnly() const {
+ SmallVector<const User *, 32> WorkList(user_begin(), user_end());
+ SmallPtrSet<const User *, 32> Visited(user_begin(), user_end());
+ while (!WorkList.empty()) {
+ const User *U = WorkList.pop_back_val();
+ // If it is transitively used by a global value or a non-constant value,
+ // it's obviously not only used by metadata.
+ if (!isa<Constant>(U) || isa<GlobalValue>(U))
+ return false;
+ for (const User *UU : U->users())
+ if (Visited.insert(UU).second)
+ WorkList.push_back(UU);
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// ValueHandleBase Class
+//===----------------------------------------------------------------------===//
+
+void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
+ assert(List && "Handle list is null?");
+
+ // Splice ourselves into the list.
+ Next = *List;
+ *List = this;
+ setPrevPtr(List);
+ if (Next) {
+ Next->setPrevPtr(&Next);
+ assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
+ }
+}
+
+void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
+ assert(List && "Must insert after existing node");
+
+ Next = List->Next;
+ setPrevPtr(&List->Next);
+ List->Next = this;
+ if (Next)
+ Next->setPrevPtr(&Next);
+}
+
+void ValueHandleBase::AddToUseList() {
+ assert(getValPtr() && "Null pointer doesn't have a use list!");
+
+ LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
+
+ if (getValPtr()->HasValueHandle) {
+ // If this value already has a ValueHandle, then it must be in the
+ // ValueHandles map already.
+ ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
+ assert(Entry && "Value doesn't have any handles?");
+ AddToExistingUseList(&Entry);
+ return;
+ }
+
+ // Ok, it doesn't have any handles yet, so we must insert it into the
+ // DenseMap. However, doing this insertion could cause the DenseMap to
+ // reallocate itself, which would invalidate all of the PrevP pointers that
+ // point into the old table. Handle this by checking for reallocation and
+ // updating the stale pointers only if needed.
+ DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
+ const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
+
+ ValueHandleBase *&Entry = Handles[getValPtr()];
+ assert(!Entry && "Value really did already have handles?");
+ AddToExistingUseList(&Entry);
+ getValPtr()->HasValueHandle = true;
+
+ // If reallocation didn't happen or if this was the first insertion, don't
+ // walk the table.
+ if (Handles.isPointerIntoBucketsArray(OldBucketPtr) ||
+ Handles.size() == 1) {
+ return;
+ }
+
+ // Okay, reallocation did happen. Fix the Prev Pointers.
+ for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
+ E = Handles.end(); I != E; ++I) {
+ assert(I->second && I->first == I->second->getValPtr() &&
+ "List invariant broken!");
+ I->second->setPrevPtr(&I->second);
+ }
+}
+
+void ValueHandleBase::RemoveFromUseList() {
+ assert(getValPtr() && getValPtr()->HasValueHandle &&
+ "Pointer doesn't have a use list!");
+
+ // Unlink this from its use list.
+ ValueHandleBase **PrevPtr = getPrevPtr();
+ assert(*PrevPtr == this && "List invariant broken");
+
+ *PrevPtr = Next;
+ if (Next) {
+ assert(Next->getPrevPtr() == &Next && "List invariant broken");
+ Next->setPrevPtr(PrevPtr);
+ return;
+ }
+
+ // If the Next pointer was null, then it is possible that this was the last
+ // ValueHandle watching VP. If so, delete its entry from the ValueHandles
+ // map.
+ LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
+ DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
+ if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
+ Handles.erase(getValPtr());
+ getValPtr()->HasValueHandle = false;
+ }
+}
+
+void ValueHandleBase::ValueIsDeleted(Value *V) {
+ assert(V->HasValueHandle && "Should only be called if ValueHandles present");
+
+ // Get the linked list base, which is guaranteed to exist since the
+ // HasValueHandle flag is set.
+ LLVMContextImpl *pImpl = V->getContext().pImpl;
+ ValueHandleBase *Entry = pImpl->ValueHandles[V];
+ assert(Entry && "Value bit set but no entries exist");
+
+ // We use a local ValueHandleBase as an iterator so that ValueHandles can add
+ // and remove themselves from the list without breaking our iteration. This
+ // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
+ // Note that we deliberately do not the support the case when dropping a value
+ // handle results in a new value handle being permanently added to the list
+ // (as might occur in theory for CallbackVH's): the new value handle will not
+ // be processed and the checking code will mete out righteous punishment if
+ // the handle is still present once we have finished processing all the other
+ // value handles (it is fine to momentarily add then remove a value handle).
+ for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
+ Iterator.RemoveFromUseList();
+ Iterator.AddToExistingUseListAfter(Entry);
+ assert(Entry->Next == &Iterator && "Loop invariant broken.");
+
+ switch (Entry->getKind()) {
+ case Assert:
+ break;
+ case Weak:
+ case WeakTracking:
+ // WeakTracking and Weak just go to null, which unlinks them
+ // from the list.
+ Entry->operator=(nullptr);
+ break;
+ case Callback:
+ // Forward to the subclass's implementation.
+ static_cast<CallbackVH*>(Entry)->deleted();
+ break;
+ }
+ }
+
+ // All callbacks, weak references, and assertingVHs should be dropped by now.
+ if (V->HasValueHandle) {
+#ifndef NDEBUG // Only in +Asserts mode...
+ dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
+ << "\n";
+ if (pImpl->ValueHandles[V]->getKind() == Assert)
+ llvm_unreachable("An asserting value handle still pointed to this"
+ " value!");
+
+#endif
+ llvm_unreachable("All references to V were not removed?");
+ }
+}
+
+void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
+ assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
+ assert(Old != New && "Changing value into itself!");
+ assert(Old->getType() == New->getType() &&
+ "replaceAllUses of value with new value of different type!");
+
+ // Get the linked list base, which is guaranteed to exist since the
+ // HasValueHandle flag is set.
+ LLVMContextImpl *pImpl = Old->getContext().pImpl;
+ ValueHandleBase *Entry = pImpl->ValueHandles[Old];
+
+ assert(Entry && "Value bit set but no entries exist");
+
+ // We use a local ValueHandleBase as an iterator so that
+ // ValueHandles can add and remove themselves from the list without
+ // breaking our iteration. This is not really an AssertingVH; we
+ // just have to give ValueHandleBase some kind.
+ for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
+ Iterator.RemoveFromUseList();
+ Iterator.AddToExistingUseListAfter(Entry);
+ assert(Entry->Next == &Iterator && "Loop invariant broken.");
+
+ switch (Entry->getKind()) {
+ case Assert:
+ case Weak:
+ // Asserting and Weak handles do not follow RAUW implicitly.
+ break;
+ case WeakTracking:
+ // Weak goes to the new value, which will unlink it from Old's list.
+ Entry->operator=(New);
+ break;
+ case Callback:
+ // Forward to the subclass's implementation.
+ static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
+ break;
+ }
+ }
+
+#ifndef NDEBUG
+ // If any new weak value handles were added while processing the
+ // list, then complain about it now.
+ if (Old->HasValueHandle)
+ for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
+ switch (Entry->getKind()) {
+ case WeakTracking:
+ dbgs() << "After RAUW from " << *Old->getType() << " %"
+ << Old->getName() << " to " << *New->getType() << " %"
+ << New->getName() << "\n";
+ llvm_unreachable(
+ "A weak tracking value handle still pointed to the old value!\n");
+ default:
+ break;
+ }
+#endif
+}
+
+// Pin the vtable to this file.
+void CallbackVH::anchor() {}
diff --git a/contrib/llvm-project/llvm/lib/IR/ValueSymbolTable.cpp b/contrib/llvm-project/llvm/lib/IR/ValueSymbolTable.cpp
new file mode 100644
index 000000000000..cf85a571f9a0
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/ValueSymbolTable.cpp
@@ -0,0 +1,130 @@
+//===- ValueSymbolTable.cpp - Implement the ValueSymbolTable class --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ValueSymbolTable class for the IR library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <utility>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "valuesymtab"
+
+// Class destructor
+ValueSymbolTable::~ValueSymbolTable() {
+#ifndef NDEBUG // Only do this in -g mode...
+ for (const auto &VI : vmap)
+ dbgs() << "Value still in symbol table! Type = '"
+ << *VI.getValue()->getType() << "' Name = '" << VI.getKeyData()
+ << "'\n";
+ assert(vmap.empty() && "Values remain in symbol table!");
+#endif
+}
+
+ValueName *ValueSymbolTable::makeUniqueName(Value *V,
+ SmallString<256> &UniqueName) {
+ unsigned BaseSize = UniqueName.size();
+ while (true) {
+ // Trim any suffix off and append the next number.
+ UniqueName.resize(BaseSize);
+ raw_svector_ostream S(UniqueName);
+ if (auto *GV = dyn_cast<GlobalValue>(V)) {
+ // A dot is appended to mark it as clone during ABI demangling so that
+ // for example "_Z1fv" and "_Z1fv.1" both demangle to "f()", the second
+ // one being a clone.
+ // On NVPTX we cannot use a dot because PTX only allows [A-Za-z0-9_$] for
+ // identifiers. This breaks ABI demangling but at least ptxas accepts and
+ // compiles the program.
+ const Module *M = GV->getParent();
+ if (!(M && Triple(M->getTargetTriple()).isNVPTX()))
+ S << ".";
+ }
+ S << ++LastUnique;
+
+ // Try insert the vmap entry with this suffix.
+ auto IterBool = vmap.insert(std::make_pair(UniqueName.str(), V));
+ if (IterBool.second)
+ return &*IterBool.first;
+ }
+}
+
+// Insert a value into the symbol table with the specified name...
+//
+void ValueSymbolTable::reinsertValue(Value *V) {
+ assert(V->hasName() && "Can't insert nameless Value into symbol table");
+
+ // Try inserting the name, assuming it won't conflict.
+ if (vmap.insert(V->getValueName())) {
+ // LLVM_DEBUG(dbgs() << " Inserted value: " << V->getValueName() << ": " <<
+ // *V << "\n");
+ return;
+ }
+
+ // Otherwise, there is a naming conflict. Rename this value.
+ SmallString<256> UniqueName(V->getName().begin(), V->getName().end());
+
+ // The name is too already used, just free it so we can allocate a new name.
+ MallocAllocator Allocator;
+ V->getValueName()->Destroy(Allocator);
+
+ ValueName *VN = makeUniqueName(V, UniqueName);
+ V->setValueName(VN);
+}
+
+void ValueSymbolTable::removeValueName(ValueName *V) {
+ // LLVM_DEBUG(dbgs() << " Removing Value: " << V->getKeyData() << "\n");
+ // Remove the value from the symbol table.
+ vmap.remove(V);
+}
+
+/// createValueName - This method attempts to create a value name and insert
+/// it into the symbol table with the specified name. If it conflicts, it
+/// auto-renames the name and returns that instead.
+ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
+ if (MaxNameSize > -1 && Name.size() > (unsigned)MaxNameSize)
+ Name = Name.substr(0, std::max(1u, (unsigned)MaxNameSize));
+
+ // In the common case, the name is not already in the symbol table.
+ auto IterBool = vmap.insert(std::make_pair(Name, V));
+ if (IterBool.second) {
+ // LLVM_DEBUG(dbgs() << " Inserted value: " << Entry.getKeyData() << ": "
+ // << *V << "\n");
+ return &*IterBool.first;
+ }
+
+ // Otherwise, there is a naming conflict. Rename this value.
+ SmallString<256> UniqueName(Name.begin(), Name.end());
+ return makeUniqueName(V, UniqueName);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+// dump - print out the symbol table
+//
+LLVM_DUMP_METHOD void ValueSymbolTable::dump() const {
+ // dbgs() << "ValueSymbolTable:\n";
+ for (const auto &I : *this) {
+ // dbgs() << " '" << I->getKeyData() << "' = ";
+ I.getValue()->dump();
+ // dbgs() << "\n";
+ }
+}
+#endif
diff --git a/contrib/llvm-project/llvm/lib/IR/VectorBuilder.cpp b/contrib/llvm-project/llvm/lib/IR/VectorBuilder.cpp
new file mode 100644
index 000000000000..e7be7a98a593
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/VectorBuilder.cpp
@@ -0,0 +1,103 @@
+//===- VectorBuilder.cpp - Builder for VP Intrinsics ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VectorBuilder class, which is used as a convenient
+// way to create VP intrinsics as if they were LLVM instructions with a
+// consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include <llvm/ADT/SmallVector.h>
+#include <llvm/IR/FPEnv.h>
+#include <llvm/IR/Instructions.h>
+#include <llvm/IR/IntrinsicInst.h>
+#include <llvm/IR/Intrinsics.h>
+#include <llvm/IR/VectorBuilder.h>
+
+namespace llvm {
+
+void VectorBuilder::handleError(const char *ErrorMsg) const {
+ if (ErrorHandling == Behavior::SilentlyReturnNone)
+ return;
+ report_fatal_error(ErrorMsg);
+}
+
+Module &VectorBuilder::getModule() const {
+ return *Builder.GetInsertBlock()->getModule();
+}
+
+Value *VectorBuilder::getAllTrueMask() {
+ auto *BoolTy = Builder.getInt1Ty();
+ auto *MaskTy = VectorType::get(BoolTy, StaticVectorLength);
+ return ConstantInt::getAllOnesValue(MaskTy);
+}
+
+Value &VectorBuilder::requestMask() {
+ if (Mask)
+ return *Mask;
+
+ return *getAllTrueMask();
+}
+
+Value &VectorBuilder::requestEVL() {
+ if (ExplicitVectorLength)
+ return *ExplicitVectorLength;
+
+ assert(!StaticVectorLength.isScalable() && "TODO vscale lowering");
+ auto *IntTy = Builder.getInt32Ty();
+ return *ConstantInt::get(IntTy, StaticVectorLength.getFixedValue());
+}
+
+Value *VectorBuilder::createVectorInstruction(unsigned Opcode, Type *ReturnTy,
+ ArrayRef<Value *> InstOpArray,
+ const Twine &Name) {
+ auto VPID = VPIntrinsic::getForOpcode(Opcode);
+ if (VPID == Intrinsic::not_intrinsic)
+ return returnWithError<Value *>("No VPIntrinsic for this opcode");
+
+ auto MaskPosOpt = VPIntrinsic::getMaskParamPos(VPID);
+ auto VLenPosOpt = VPIntrinsic::getVectorLengthParamPos(VPID);
+ size_t NumInstParams = InstOpArray.size();
+ size_t NumVPParams =
+ NumInstParams + MaskPosOpt.has_value() + VLenPosOpt.has_value();
+
+ SmallVector<Value *, 6> IntrinParams;
+
+ // Whether the mask and vlen parameter are at the end of the parameter list.
+ bool TrailingMaskAndVLen =
+ std::min<size_t>(MaskPosOpt.value_or(NumInstParams),
+ VLenPosOpt.value_or(NumInstParams)) >= NumInstParams;
+
+ if (TrailingMaskAndVLen) {
+ // Fast path for trailing mask, vector length.
+ IntrinParams.append(InstOpArray.begin(), InstOpArray.end());
+ IntrinParams.resize(NumVPParams);
+ } else {
+ IntrinParams.resize(NumVPParams);
+ // Insert mask and evl operands in between the instruction operands.
+ for (size_t VPParamIdx = 0, ParamIdx = 0; VPParamIdx < NumVPParams;
+ ++VPParamIdx) {
+ if ((MaskPosOpt && MaskPosOpt.value_or(NumVPParams) == VPParamIdx) ||
+ (VLenPosOpt && VLenPosOpt.value_or(NumVPParams) == VPParamIdx))
+ continue;
+ assert(ParamIdx < NumInstParams);
+ IntrinParams[VPParamIdx] = InstOpArray[ParamIdx++];
+ }
+ }
+
+ if (MaskPosOpt)
+ IntrinParams[*MaskPosOpt] = &requestMask();
+ if (VLenPosOpt)
+ IntrinParams[*VLenPosOpt] = &requestEVL();
+
+ auto *VPDecl = VPIntrinsic::getDeclarationForParams(&getModule(), VPID,
+ ReturnTy, IntrinParams);
+ return Builder.CreateCall(VPDecl, IntrinParams, Name);
+}
+
+} // namespace llvm
diff --git a/contrib/llvm-project/llvm/lib/IR/Verifier.cpp b/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
new file mode 100644
index 000000000000..75d02f4c8c82
--- /dev/null
+++ b/contrib/llvm-project/llvm/lib/IR/Verifier.cpp
@@ -0,0 +1,6604 @@
+//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function verifier interface, that can be used for some
+// basic correctness checking of input to the system.
+//
+// Note that this does not provide full `Java style' security and verifications,
+// instead it just tries to ensure that code is well-formed.
+//
+// * Both of a binary operator's parameters are of the same type
+// * Verify that the indices of mem access instructions match other operands
+// * Verify that arithmetic and other things are only performed on first-class
+// types. Verify that shifts & logicals only happen on integrals f.e.
+// * All of the constants in a switch statement are of the correct type
+// * The code is in valid SSA form
+// * It should be illegal to put a label into any other type (like a structure)
+// or to return one. [except constant arrays!]
+// * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
+// * PHI nodes must have an entry for each predecessor, with no extras.
+// * PHI nodes must be the first thing in a basic block, all grouped together
+// * PHI nodes must have at least one entry
+// * All basic blocks should only end with terminator insts, not contain them
+// * The entry node to a function must not have predecessors
+// * All Instructions must be embedded into a basic block
+// * Functions cannot take a void-typed parameter
+// * Verify that a function's argument list agrees with it's declared type.
+// * It is illegal to specify a name for a void value.
+// * It is illegal to have a internal global value with no initializer
+// * It is illegal to have a ret instruction that returns a value that does not
+// agree with the function return value type.
+// * Function call argument types match the function prototype
+// * A landing pad is defined by a landingpad instruction, and can be jumped to
+// only by the unwind edge of an invoke instruction.
+// * A landingpad instruction must be the first non-PHI instruction in the
+// block.
+// * Landingpad instructions must be in a function with a personality function.
+// * All other things that are tested by asserts spread about the code...
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Verifier.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
+using namespace llvm;
+
+static cl::opt<bool> VerifyNoAliasScopeDomination(
+ "verify-noalias-scope-decl-dom", cl::Hidden, cl::init(false),
+ cl::desc("Ensure that llvm.experimental.noalias.scope.decl for identical "
+ "scopes are not dominating"));
+
+namespace llvm {
+
+struct VerifierSupport {
+ raw_ostream *OS;
+ const Module &M;
+ ModuleSlotTracker MST;
+ Triple TT;
+ const DataLayout &DL;
+ LLVMContext &Context;
+
+ /// Track the brokenness of the module while recursively visiting.
+ bool Broken = false;
+ /// Broken debug info can be "recovered" from by stripping the debug info.
+ bool BrokenDebugInfo = false;
+ /// Whether to treat broken debug info as an error.
+ bool TreatBrokenDebugInfoAsError = true;
+
+ explicit VerifierSupport(raw_ostream *OS, const Module &M)
+ : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
+ Context(M.getContext()) {}
+
+private:
+ void Write(const Module *M) {
+ *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
+ }
+
+ void Write(const Value *V) {
+ if (V)
+ Write(*V);
+ }
+
+ void Write(const Value &V) {
+ if (isa<Instruction>(V)) {
+ V.print(*OS, MST);
+ *OS << '\n';
+ } else {
+ V.printAsOperand(*OS, true, MST);
+ *OS << '\n';
+ }
+ }
+
+ void Write(const Metadata *MD) {
+ if (!MD)
+ return;
+ MD->print(*OS, MST, &M);
+ *OS << '\n';
+ }
+
+ template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
+ Write(MD.get());
+ }
+
+ void Write(const NamedMDNode *NMD) {
+ if (!NMD)
+ return;
+ NMD->print(*OS, MST);
+ *OS << '\n';
+ }
+
+ void Write(Type *T) {
+ if (!T)
+ return;
+ *OS << ' ' << *T;
+ }
+
+ void Write(const Comdat *C) {
+ if (!C)
+ return;
+ *OS << *C;
+ }
+
+ void Write(const APInt *AI) {
+ if (!AI)
+ return;
+ *OS << *AI << '\n';
+ }
+
+ void Write(const unsigned i) { *OS << i << '\n'; }
+
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void Write(const Attribute *A) {
+ if (!A)
+ return;
+ *OS << A->getAsString() << '\n';
+ }
+
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void Write(const AttributeSet *AS) {
+ if (!AS)
+ return;
+ *OS << AS->getAsString() << '\n';
+ }
+
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void Write(const AttributeList *AL) {
+ if (!AL)
+ return;
+ AL->print(*OS);
+ }
+
+ template <typename T> void Write(ArrayRef<T> Vs) {
+ for (const T &V : Vs)
+ Write(V);
+ }
+
+ template <typename T1, typename... Ts>
+ void WriteTs(const T1 &V1, const Ts &... Vs) {
+ Write(V1);
+ WriteTs(Vs...);
+ }
+
+ template <typename... Ts> void WriteTs() {}
+
+public:
+ /// A check failed, so printout out the condition and the message.
+ ///
+ /// This provides a nice place to put a breakpoint if you want to see why
+ /// something is not correct.
+ void CheckFailed(const Twine &Message) {
+ if (OS)
+ *OS << Message << '\n';
+ Broken = true;
+ }
+
+ /// A check failed (with values to print).
+ ///
+ /// This calls the Message-only version so that the above is easier to set a
+ /// breakpoint on.
+ template <typename T1, typename... Ts>
+ void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
+ CheckFailed(Message);
+ if (OS)
+ WriteTs(V1, Vs...);
+ }
+
+ /// A debug info check failed.
+ void DebugInfoCheckFailed(const Twine &Message) {
+ if (OS)
+ *OS << Message << '\n';
+ Broken |= TreatBrokenDebugInfoAsError;
+ BrokenDebugInfo = true;
+ }
+
+ /// A debug info check failed (with values to print).
+ template <typename T1, typename... Ts>
+ void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
+ const Ts &... Vs) {
+ DebugInfoCheckFailed(Message);
+ if (OS)
+ WriteTs(V1, Vs...);
+ }
+};
+
+} // namespace llvm
+
+namespace {
+
+class Verifier : public InstVisitor<Verifier>, VerifierSupport {
+ friend class InstVisitor<Verifier>;
+
+ // ISD::ArgFlagsTy::MemAlign only have 4 bits for alignment, so
+ // the alignment size should not exceed 2^15. Since encode(Align)
+ // would plus the shift value by 1, the alignment size should
+ // not exceed 2^14, otherwise it can NOT be properly lowered
+ // in backend.
+ static constexpr unsigned ParamMaxAlignment = 1 << 14;
+ DominatorTree DT;
+
+ /// When verifying a basic block, keep track of all of the
+ /// instructions we have seen so far.
+ ///
+ /// This allows us to do efficient dominance checks for the case when an
+ /// instruction has an operand that is an instruction in the same block.
+ SmallPtrSet<Instruction *, 16> InstsInThisBlock;
+
+ /// Keep track of the metadata nodes that have been checked already.
+ SmallPtrSet<const Metadata *, 32> MDNodes;
+
+ /// Keep track which DISubprogram is attached to which function.
+ DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
+
+ /// Track all DICompileUnits visited.
+ SmallPtrSet<const Metadata *, 2> CUVisited;
+
+ /// The result type for a landingpad.
+ Type *LandingPadResultTy;
+
+ /// Whether we've seen a call to @llvm.localescape in this function
+ /// already.
+ bool SawFrameEscape;
+
+ /// Whether the current function has a DISubprogram attached to it.
+ bool HasDebugInfo = false;
+
+ /// The current source language.
+ dwarf::SourceLanguage CurrentSourceLang = dwarf::DW_LANG_lo_user;
+
+ /// Whether source was present on the first DIFile encountered in each CU.
+ DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
+
+ /// Stores the count of how many objects were passed to llvm.localescape for a
+ /// given function and the largest index passed to llvm.localrecover.
+ DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
+
+ // Maps catchswitches and cleanuppads that unwind to siblings to the
+ // terminators that indicate the unwind, used to detect cycles therein.
+ MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
+
+ /// Cache of constants visited in search of ConstantExprs.
+ SmallPtrSet<const Constant *, 32> ConstantExprVisited;
+
+ /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
+ SmallVector<const Function *, 4> DeoptimizeDeclarations;
+
+ /// Cache of attribute lists verified.
+ SmallPtrSet<const void *, 32> AttributeListsVisited;
+
+ // Verify that this GlobalValue is only used in this module.
+ // This map is used to avoid visiting uses twice. We can arrive at a user
+ // twice, if they have multiple operands. In particular for very large
+ // constant expressions, we can arrive at a particular user many times.
+ SmallPtrSet<const Value *, 32> GlobalValueVisited;
+
+ // Keeps track of duplicate function argument debug info.
+ SmallVector<const DILocalVariable *, 16> DebugFnArgs;
+
+ TBAAVerifier TBAAVerifyHelper;
+
+ SmallVector<IntrinsicInst *, 4> NoAliasScopeDecls;
+
+ void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
+
+public:
+ explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
+ const Module &M)
+ : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
+ SawFrameEscape(false), TBAAVerifyHelper(this) {
+ TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
+ }
+
+ bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
+
+ bool verify(const Function &F) {
+ assert(F.getParent() == &M &&
+ "An instance of this class only works with a specific module!");
+
+ // First ensure the function is well-enough formed to compute dominance
+ // information, and directly compute a dominance tree. We don't rely on the
+ // pass manager to provide this as it isolates us from a potentially
+ // out-of-date dominator tree and makes it significantly more complex to run
+ // this code outside of a pass manager.
+ // FIXME: It's really gross that we have to cast away constness here.
+ if (!F.empty())
+ DT.recalculate(const_cast<Function &>(F));
+
+ for (const BasicBlock &BB : F) {
+ if (!BB.empty() && BB.back().isTerminator())
+ continue;
+
+ if (OS) {
+ *OS << "Basic Block in function '" << F.getName()
+ << "' does not have terminator!\n";
+ BB.printAsOperand(*OS, true, MST);
+ *OS << "\n";
+ }
+ return false;
+ }
+
+ Broken = false;
+ // FIXME: We strip const here because the inst visitor strips const.
+ visit(const_cast<Function &>(F));
+ verifySiblingFuncletUnwinds();
+ InstsInThisBlock.clear();
+ DebugFnArgs.clear();
+ LandingPadResultTy = nullptr;
+ SawFrameEscape = false;
+ SiblingFuncletInfo.clear();
+ verifyNoAliasScopeDecl();
+ NoAliasScopeDecls.clear();
+
+ return !Broken;
+ }
+
+ /// Verify the module that this instance of \c Verifier was initialized with.
+ bool verify() {
+ Broken = false;
+
+ // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
+ for (const Function &F : M)
+ if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
+ DeoptimizeDeclarations.push_back(&F);
+
+ // Now that we've visited every function, verify that we never asked to
+ // recover a frame index that wasn't escaped.
+ verifyFrameRecoverIndices();
+ for (const GlobalVariable &GV : M.globals())
+ visitGlobalVariable(GV);
+
+ for (const GlobalAlias &GA : M.aliases())
+ visitGlobalAlias(GA);
+
+ for (const GlobalIFunc &GI : M.ifuncs())
+ visitGlobalIFunc(GI);
+
+ for (const NamedMDNode &NMD : M.named_metadata())
+ visitNamedMDNode(NMD);
+
+ for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
+ visitComdat(SMEC.getValue());
+
+ visitModuleFlags();
+ visitModuleIdents();
+ visitModuleCommandLines();
+
+ verifyCompileUnits();
+
+ verifyDeoptimizeCallingConvs();
+ DISubprogramAttachments.clear();
+ return !Broken;
+ }
+
+private:
+ /// Whether a metadata node is allowed to be, or contain, a DILocation.
+ enum class AreDebugLocsAllowed { No, Yes };
+
+ // Verification methods...
+ void visitGlobalValue(const GlobalValue &GV);
+ void visitGlobalVariable(const GlobalVariable &GV);
+ void visitGlobalAlias(const GlobalAlias &GA);
+ void visitGlobalIFunc(const GlobalIFunc &GI);
+ void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
+ void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
+ const GlobalAlias &A, const Constant &C);
+ void visitNamedMDNode(const NamedMDNode &NMD);
+ void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
+ void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
+ void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
+ void visitComdat(const Comdat &C);
+ void visitModuleIdents();
+ void visitModuleCommandLines();
+ void visitModuleFlags();
+ void visitModuleFlag(const MDNode *Op,
+ DenseMap<const MDString *, const MDNode *> &SeenIDs,
+ SmallVectorImpl<const MDNode *> &Requirements);
+ void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
+ void visitFunction(const Function &F);
+ void visitBasicBlock(BasicBlock &BB);
+ void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
+ void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
+ void visitProfMetadata(Instruction &I, MDNode *MD);
+ void visitAnnotationMetadata(MDNode *Annotation);
+ void visitAliasScopeMetadata(const MDNode *MD);
+ void visitAliasScopeListMetadata(const MDNode *MD);
+ void visitAccessGroupMetadata(const MDNode *MD);
+
+ template <class Ty> bool isValidMetadataArray(const MDTuple &N);
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
+#include "llvm/IR/Metadata.def"
+ void visitDIScope(const DIScope &N);
+ void visitDIVariable(const DIVariable &N);
+ void visitDILexicalBlockBase(const DILexicalBlockBase &N);
+ void visitDITemplateParameter(const DITemplateParameter &N);
+
+ void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
+
+ // InstVisitor overrides...
+ using InstVisitor<Verifier>::visit;
+ void visit(Instruction &I);
+
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
+ void visitPHINode(PHINode &PN);
+ void visitCallBase(CallBase &Call);
+ void visitUnaryOperator(UnaryOperator &U);
+ void visitBinaryOperator(BinaryOperator &B);
+ void visitICmpInst(ICmpInst &IC);
+ void visitFCmpInst(FCmpInst &FC);
+ void visitExtractElementInst(ExtractElementInst &EI);
+ void visitInsertElementInst(InsertElementInst &EI);
+ void visitShuffleVectorInst(ShuffleVectorInst &EI);
+ void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
+ void visitCallInst(CallInst &CI);
+ void visitInvokeInst(InvokeInst &II);
+ void visitGetElementPtrInst(GetElementPtrInst &GEP);
+ void visitLoadInst(LoadInst &LI);
+ void visitStoreInst(StoreInst &SI);
+ void verifyDominatesUse(Instruction &I, unsigned i);
+ void visitInstruction(Instruction &I);
+ void visitTerminator(Instruction &I);
+ void visitBranchInst(BranchInst &BI);
+ void visitReturnInst(ReturnInst &RI);
+ void visitSwitchInst(SwitchInst &SI);
+ void visitIndirectBrInst(IndirectBrInst &BI);
+ void visitCallBrInst(CallBrInst &CBI);
+ void visitSelectInst(SelectInst &SI);
+ void visitUserOp1(Instruction &I);
+ void visitUserOp2(Instruction &I) { visitUserOp1(I); }
+ void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
+ void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
+ void visitVPIntrinsic(VPIntrinsic &VPI);
+ void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
+ void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
+ void visitAtomicRMWInst(AtomicRMWInst &RMWI);
+ void visitFenceInst(FenceInst &FI);
+ void visitAllocaInst(AllocaInst &AI);
+ void visitExtractValueInst(ExtractValueInst &EVI);
+ void visitInsertValueInst(InsertValueInst &IVI);
+ void visitEHPadPredecessors(Instruction &I);
+ void visitLandingPadInst(LandingPadInst &LPI);
+ void visitResumeInst(ResumeInst &RI);
+ void visitCatchPadInst(CatchPadInst &CPI);
+ void visitCatchReturnInst(CatchReturnInst &CatchReturn);
+ void visitCleanupPadInst(CleanupPadInst &CPI);
+ void visitFuncletPadInst(FuncletPadInst &FPI);
+ void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
+ void visitCleanupReturnInst(CleanupReturnInst &CRI);
+
+ void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
+ void verifySwiftErrorValue(const Value *SwiftErrorVal);
+ void verifyTailCCMustTailAttrs(const AttrBuilder &Attrs, StringRef Context);
+ void verifyMustTailCall(CallInst &CI);
+ bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
+ void verifyAttributeTypes(AttributeSet Attrs, const Value *V);
+ void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
+ void checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
+ const Value *V);
+ void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
+ const Value *V, bool IsIntrinsic, bool IsInlineAsm);
+ void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
+
+ void visitConstantExprsRecursively(const Constant *EntryC);
+ void visitConstantExpr(const ConstantExpr *CE);
+ void verifyInlineAsmCall(const CallBase &Call);
+ void verifyStatepoint(const CallBase &Call);
+ void verifyFrameRecoverIndices();
+ void verifySiblingFuncletUnwinds();
+
+ void verifyFragmentExpression(const DbgVariableIntrinsic &I);
+ template <typename ValueOrMetadata>
+ void verifyFragmentExpression(const DIVariable &V,
+ DIExpression::FragmentInfo Fragment,
+ ValueOrMetadata *Desc);
+ void verifyFnArgs(const DbgVariableIntrinsic &I);
+ void verifyNotEntryValue(const DbgVariableIntrinsic &I);
+
+ /// Module-level debug info verification...
+ void verifyCompileUnits();
+
+ /// Module-level verification that all @llvm.experimental.deoptimize
+ /// declarations share the same calling convention.
+ void verifyDeoptimizeCallingConvs();
+
+ void verifyAttachedCallBundle(const CallBase &Call,
+ const OperandBundleUse &BU);
+
+ /// Verify all-or-nothing property of DIFile source attribute within a CU.
+ void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
+
+ /// Verify the llvm.experimental.noalias.scope.decl declarations
+ void verifyNoAliasScopeDecl();
+};
+
+} // end anonymous namespace
+
+/// We know that cond should be true, if not print an error message.
+#define Check(C, ...) \
+ do { \
+ if (!(C)) { \
+ CheckFailed(__VA_ARGS__); \
+ return; \
+ } \
+ } while (false)
+
+/// We know that a debug info condition should be true, if not print
+/// an error message.
+#define CheckDI(C, ...) \
+ do { \
+ if (!(C)) { \
+ DebugInfoCheckFailed(__VA_ARGS__); \
+ return; \
+ } \
+ } while (false)
+
+void Verifier::visit(Instruction &I) {
+ for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
+ Check(I.getOperand(i) != nullptr, "Operand is null", &I);
+ InstVisitor<Verifier>::visit(I);
+}
+
+// Helper to iterate over indirect users. By returning false, the callback can ask to stop traversing further.
+static void forEachUser(const Value *User,
+ SmallPtrSet<const Value *, 32> &Visited,
+ llvm::function_ref<bool(const Value *)> Callback) {
+ if (!Visited.insert(User).second)
+ return;
+
+ SmallVector<const Value *> WorkList;
+ append_range(WorkList, User->materialized_users());
+ while (!WorkList.empty()) {
+ const Value *Cur = WorkList.pop_back_val();
+ if (!Visited.insert(Cur).second)
+ continue;
+ if (Callback(Cur))
+ append_range(WorkList, Cur->materialized_users());
+ }
+}
+
+void Verifier::visitGlobalValue(const GlobalValue &GV) {
+ Check(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
+ "Global is external, but doesn't have external or weak linkage!", &GV);
+
+ if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV)) {
+
+ if (MaybeAlign A = GO->getAlign()) {
+ Check(A->value() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", GO);
+ }
+ }
+ Check(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
+ "Only global variables can have appending linkage!", &GV);
+
+ if (GV.hasAppendingLinkage()) {
+ const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
+ Check(GVar && GVar->getValueType()->isArrayTy(),
+ "Only global arrays can have appending linkage!", GVar);
+ }
+
+ if (GV.isDeclarationForLinker())
+ Check(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
+
+ if (GV.hasDLLImportStorageClass()) {
+ Check(!GV.isDSOLocal(), "GlobalValue with DLLImport Storage is dso_local!",
+ &GV);
+
+ Check((GV.isDeclaration() &&
+ (GV.hasExternalLinkage() || GV.hasExternalWeakLinkage())) ||
+ GV.hasAvailableExternallyLinkage(),
+ "Global is marked as dllimport, but not external", &GV);
+ }
+
+ if (GV.isImplicitDSOLocal())
+ Check(GV.isDSOLocal(),
+ "GlobalValue with local linkage or non-default "
+ "visibility must be dso_local!",
+ &GV);
+
+ forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ if (!I->getParent() || !I->getParent()->getParent())
+ CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
+ I);
+ else if (I->getParent()->getParent()->getParent() != &M)
+ CheckFailed("Global is referenced in a different module!", &GV, &M, I,
+ I->getParent()->getParent(),
+ I->getParent()->getParent()->getParent());
+ return false;
+ } else if (const Function *F = dyn_cast<Function>(V)) {
+ if (F->getParent() != &M)
+ CheckFailed("Global is used by function in a different module", &GV, &M,
+ F, F->getParent());
+ return false;
+ }
+ return true;
+ });
+}
+
+void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
+ if (GV.hasInitializer()) {
+ Check(GV.getInitializer()->getType() == GV.getValueType(),
+ "Global variable initializer type does not match global "
+ "variable type!",
+ &GV);
+ // If the global has common linkage, it must have a zero initializer and
+ // cannot be constant.
+ if (GV.hasCommonLinkage()) {
+ Check(GV.getInitializer()->isNullValue(),
+ "'common' global must have a zero initializer!", &GV);
+ Check(!GV.isConstant(), "'common' global may not be marked constant!",
+ &GV);
+ Check(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
+ }
+ }
+
+ if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
+ GV.getName() == "llvm.global_dtors")) {
+ Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
+ "invalid linkage for intrinsic global variable", &GV);
+ // Don't worry about emitting an error for it not being an array,
+ // visitGlobalValue will complain on appending non-array.
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
+ StructType *STy = dyn_cast<StructType>(ATy->getElementType());
+ PointerType *FuncPtrTy =
+ FunctionType::get(Type::getVoidTy(Context), false)->
+ getPointerTo(DL.getProgramAddressSpace());
+ Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
+ STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
+ STy->getTypeAtIndex(1) == FuncPtrTy,
+ "wrong type for intrinsic global variable", &GV);
+ Check(STy->getNumElements() == 3,
+ "the third field of the element type is mandatory, "
+ "specify i8* null to migrate from the obsoleted 2-field form");
+ Type *ETy = STy->getTypeAtIndex(2);
+ Type *Int8Ty = Type::getInt8Ty(ETy->getContext());
+ Check(ETy->isPointerTy() &&
+ cast<PointerType>(ETy)->isOpaqueOrPointeeTypeMatches(Int8Ty),
+ "wrong type for intrinsic global variable", &GV);
+ }
+ }
+
+ if (GV.hasName() && (GV.getName() == "llvm.used" ||
+ GV.getName() == "llvm.compiler.used")) {
+ Check(!GV.hasInitializer() || GV.hasAppendingLinkage(),
+ "invalid linkage for intrinsic global variable", &GV);
+ Type *GVType = GV.getValueType();
+ if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
+ PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
+ Check(PTy, "wrong type for intrinsic global variable", &GV);
+ if (GV.hasInitializer()) {
+ const Constant *Init = GV.getInitializer();
+ const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
+ Check(InitArray, "wrong initalizer for intrinsic global variable",
+ Init);
+ for (Value *Op : InitArray->operands()) {
+ Value *V = Op->stripPointerCasts();
+ Check(isa<GlobalVariable>(V) || isa<Function>(V) ||
+ isa<GlobalAlias>(V),
+ Twine("invalid ") + GV.getName() + " member", V);
+ Check(V->hasName(),
+ Twine("members of ") + GV.getName() + " must be named", V);
+ }
+ }
+ }
+ }
+
+ // Visit any debug info attachments.
+ SmallVector<MDNode *, 1> MDs;
+ GV.getMetadata(LLVMContext::MD_dbg, MDs);
+ for (auto *MD : MDs) {
+ if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
+ visitDIGlobalVariableExpression(*GVE);
+ else
+ CheckDI(false, "!dbg attachment of global variable must be a "
+ "DIGlobalVariableExpression");
+ }
+
+ // Scalable vectors cannot be global variables, since we don't know
+ // the runtime size. If the global is an array containing scalable vectors,
+ // that will be caught by the isValidElementType methods in StructType or
+ // ArrayType instead.
+ Check(!isa<ScalableVectorType>(GV.getValueType()),
+ "Globals cannot contain scalable vectors", &GV);
+
+ if (auto *STy = dyn_cast<StructType>(GV.getValueType()))
+ Check(!STy->containsScalableVectorType(),
+ "Globals cannot contain scalable vectors", &GV);
+
+ if (!GV.hasInitializer()) {
+ visitGlobalValue(GV);
+ return;
+ }
+
+ // Walk any aggregate initializers looking for bitcasts between address spaces
+ visitConstantExprsRecursively(GV.getInitializer());
+
+ visitGlobalValue(GV);
+}
+
+void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
+ SmallPtrSet<const GlobalAlias*, 4> Visited;
+ Visited.insert(&GA);
+ visitAliaseeSubExpr(Visited, GA, C);
+}
+
+void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
+ const GlobalAlias &GA, const Constant &C) {
+ if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
+ Check(!GV->isDeclarationForLinker(), "Alias must point to a definition",
+ &GA);
+
+ if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
+ Check(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
+
+ Check(!GA2->isInterposable(),
+ "Alias cannot point to an interposable alias", &GA);
+ } else {
+ // Only continue verifying subexpressions of GlobalAliases.
+ // Do not recurse into global initializers.
+ return;
+ }
+ }
+
+ if (const auto *CE = dyn_cast<ConstantExpr>(&C))
+ visitConstantExprsRecursively(CE);
+
+ for (const Use &U : C.operands()) {
+ Value *V = &*U;
+ if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
+ visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
+ else if (const auto *C2 = dyn_cast<Constant>(V))
+ visitAliaseeSubExpr(Visited, GA, *C2);
+ }
+}
+
+void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
+ Check(GlobalAlias::isValidLinkage(GA.getLinkage()),
+ "Alias should have private, internal, linkonce, weak, linkonce_odr, "
+ "weak_odr, or external linkage!",
+ &GA);
+ const Constant *Aliasee = GA.getAliasee();
+ Check(Aliasee, "Aliasee cannot be NULL!", &GA);
+ Check(GA.getType() == Aliasee->getType(),
+ "Alias and aliasee types should match!", &GA);
+
+ Check(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
+ "Aliasee should be either GlobalValue or ConstantExpr", &GA);
+
+ visitAliaseeSubExpr(GA, *Aliasee);
+
+ visitGlobalValue(GA);
+}
+
+void Verifier::visitGlobalIFunc(const GlobalIFunc &GI) {
+ Check(GlobalIFunc::isValidLinkage(GI.getLinkage()),
+ "IFunc should have private, internal, linkonce, weak, linkonce_odr, "
+ "weak_odr, or external linkage!",
+ &GI);
+ // Pierce through ConstantExprs and GlobalAliases and check that the resolver
+ // is a Function definition.
+ const Function *Resolver = GI.getResolverFunction();
+ Check(Resolver, "IFunc must have a Function resolver", &GI);
+ Check(!Resolver->isDeclarationForLinker(),
+ "IFunc resolver must be a definition", &GI);
+
+ // Check that the immediate resolver operand (prior to any bitcasts) has the
+ // correct type.
+ const Type *ResolverTy = GI.getResolver()->getType();
+ const Type *ResolverFuncTy =
+ GlobalIFunc::getResolverFunctionType(GI.getValueType());
+ Check(ResolverTy == ResolverFuncTy->getPointerTo(),
+ "IFunc resolver has incorrect type", &GI);
+}
+
+void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
+ // There used to be various other llvm.dbg.* nodes, but we don't support
+ // upgrading them and we want to reserve the namespace for future uses.
+ if (NMD.getName().startswith("llvm.dbg."))
+ CheckDI(NMD.getName() == "llvm.dbg.cu",
+ "unrecognized named metadata node in the llvm.dbg namespace", &NMD);
+ for (const MDNode *MD : NMD.operands()) {
+ if (NMD.getName() == "llvm.dbg.cu")
+ CheckDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
+
+ if (!MD)
+ continue;
+
+ visitMDNode(*MD, AreDebugLocsAllowed::Yes);
+ }
+}
+
+void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
+ // Only visit each node once. Metadata can be mutually recursive, so this
+ // avoids infinite recursion here, as well as being an optimization.
+ if (!MDNodes.insert(&MD).second)
+ return;
+
+ Check(&MD.getContext() == &Context,
+ "MDNode context does not match Module context!", &MD);
+
+ switch (MD.getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid MDNode subclass");
+ case Metadata::MDTupleKind:
+ break;
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ visit##CLASS(cast<CLASS>(MD)); \
+ break;
+#include "llvm/IR/Metadata.def"
+ }
+
+ for (const Metadata *Op : MD.operands()) {
+ if (!Op)
+ continue;
+ Check(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
+ &MD, Op);
+ CheckDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
+ "DILocation not allowed within this metadata node", &MD, Op);
+ if (auto *N = dyn_cast<MDNode>(Op)) {
+ visitMDNode(*N, AllowLocs);
+ continue;
+ }
+ if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
+ visitValueAsMetadata(*V, nullptr);
+ continue;
+ }
+ }
+
+ // Check these last, so we diagnose problems in operands first.
+ Check(!MD.isTemporary(), "Expected no forward declarations!", &MD);
+ Check(MD.isResolved(), "All nodes should be resolved!", &MD);
+}
+
+void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
+ Check(MD.getValue(), "Expected valid value", &MD);
+ Check(!MD.getValue()->getType()->isMetadataTy(),
+ "Unexpected metadata round-trip through values", &MD, MD.getValue());
+
+ auto *L = dyn_cast<LocalAsMetadata>(&MD);
+ if (!L)
+ return;
+
+ Check(F, "function-local metadata used outside a function", L);
+
+ // If this was an instruction, bb, or argument, verify that it is in the
+ // function that we expect.
+ Function *ActualF = nullptr;
+ if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
+ Check(I->getParent(), "function-local metadata not in basic block", L, I);
+ ActualF = I->getParent()->getParent();
+ } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
+ ActualF = BB->getParent();
+ else if (Argument *A = dyn_cast<Argument>(L->getValue()))
+ ActualF = A->getParent();
+ assert(ActualF && "Unimplemented function local metadata case!");
+
+ Check(ActualF == F, "function-local metadata used in wrong function", L);
+}
+
+void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
+ Metadata *MD = MDV.getMetadata();
+ if (auto *N = dyn_cast<MDNode>(MD)) {
+ visitMDNode(*N, AreDebugLocsAllowed::No);
+ return;
+ }
+
+ // Only visit each node once. Metadata can be mutually recursive, so this
+ // avoids infinite recursion here, as well as being an optimization.
+ if (!MDNodes.insert(MD).second)
+ return;
+
+ if (auto *V = dyn_cast<ValueAsMetadata>(MD))
+ visitValueAsMetadata(*V, F);
+}
+
+static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
+static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
+static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
+
+void Verifier::visitDILocation(const DILocation &N) {
+ CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "location requires a valid scope", &N, N.getRawScope());
+ if (auto *IA = N.getRawInlinedAt())
+ CheckDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
+ if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
+ CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
+}
+
+void Verifier::visitGenericDINode(const GenericDINode &N) {
+ CheckDI(N.getTag(), "invalid tag", &N);
+}
+
+void Verifier::visitDIScope(const DIScope &N) {
+ if (auto *F = N.getRawFile())
+ CheckDI(isa<DIFile>(F), "invalid file", &N, F);
+}
+
+void Verifier::visitDISubrange(const DISubrange &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
+ bool HasAssumedSizedArraySupport = dwarf::isFortran(CurrentSourceLang);
+ CheckDI(HasAssumedSizedArraySupport || N.getRawCountNode() ||
+ N.getRawUpperBound(),
+ "Subrange must contain count or upperBound", &N);
+ CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
+ "Subrange can have any one of count or upperBound", &N);
+ auto *CBound = N.getRawCountNode();
+ CheckDI(!CBound || isa<ConstantAsMetadata>(CBound) ||
+ isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
+ "Count must be signed constant or DIVariable or DIExpression", &N);
+ auto Count = N.getCount();
+ CheckDI(!Count || !Count.is<ConstantInt *>() ||
+ Count.get<ConstantInt *>()->getSExtValue() >= -1,
+ "invalid subrange count", &N);
+ auto *LBound = N.getRawLowerBound();
+ CheckDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
+ isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
+ "LowerBound must be signed constant or DIVariable or DIExpression",
+ &N);
+ auto *UBound = N.getRawUpperBound();
+ CheckDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
+ isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
+ "UpperBound must be signed constant or DIVariable or DIExpression",
+ &N);
+ auto *Stride = N.getRawStride();
+ CheckDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
+ isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
+ "Stride must be signed constant or DIVariable or DIExpression", &N);
+}
+
+void Verifier::visitDIGenericSubrange(const DIGenericSubrange &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_generic_subrange, "invalid tag", &N);
+ CheckDI(N.getRawCountNode() || N.getRawUpperBound(),
+ "GenericSubrange must contain count or upperBound", &N);
+ CheckDI(!N.getRawCountNode() || !N.getRawUpperBound(),
+ "GenericSubrange can have any one of count or upperBound", &N);
+ auto *CBound = N.getRawCountNode();
+ CheckDI(!CBound || isa<DIVariable>(CBound) || isa<DIExpression>(CBound),
+ "Count must be signed constant or DIVariable or DIExpression", &N);
+ auto *LBound = N.getRawLowerBound();
+ CheckDI(LBound, "GenericSubrange must contain lowerBound", &N);
+ CheckDI(isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
+ "LowerBound must be signed constant or DIVariable or DIExpression",
+ &N);
+ auto *UBound = N.getRawUpperBound();
+ CheckDI(!UBound || isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
+ "UpperBound must be signed constant or DIVariable or DIExpression",
+ &N);
+ auto *Stride = N.getRawStride();
+ CheckDI(Stride, "GenericSubrange must contain stride", &N);
+ CheckDI(isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
+ "Stride must be signed constant or DIVariable or DIExpression", &N);
+}
+
+void Verifier::visitDIEnumerator(const DIEnumerator &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
+}
+
+void Verifier::visitDIBasicType(const DIBasicType &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_base_type ||
+ N.getTag() == dwarf::DW_TAG_unspecified_type ||
+ N.getTag() == dwarf::DW_TAG_string_type,
+ "invalid tag", &N);
+}
+
+void Verifier::visitDIStringType(const DIStringType &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_string_type, "invalid tag", &N);
+ CheckDI(!(N.isBigEndian() && N.isLittleEndian()), "has conflicting flags",
+ &N);
+}
+
+void Verifier::visitDIDerivedType(const DIDerivedType &N) {
+ // Common scope checks.
+ visitDIScope(N);
+
+ CheckDI(N.getTag() == dwarf::DW_TAG_typedef ||
+ N.getTag() == dwarf::DW_TAG_pointer_type ||
+ N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
+ N.getTag() == dwarf::DW_TAG_reference_type ||
+ N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
+ N.getTag() == dwarf::DW_TAG_const_type ||
+ N.getTag() == dwarf::DW_TAG_immutable_type ||
+ N.getTag() == dwarf::DW_TAG_volatile_type ||
+ N.getTag() == dwarf::DW_TAG_restrict_type ||
+ N.getTag() == dwarf::DW_TAG_atomic_type ||
+ N.getTag() == dwarf::DW_TAG_member ||
+ N.getTag() == dwarf::DW_TAG_inheritance ||
+ N.getTag() == dwarf::DW_TAG_friend ||
+ N.getTag() == dwarf::DW_TAG_set_type,
+ "invalid tag", &N);
+ if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
+ CheckDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
+ N.getRawExtraData());
+ }
+
+ if (N.getTag() == dwarf::DW_TAG_set_type) {
+ if (auto *T = N.getRawBaseType()) {
+ auto *Enum = dyn_cast_or_null<DICompositeType>(T);
+ auto *Basic = dyn_cast_or_null<DIBasicType>(T);
+ CheckDI(
+ (Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type) ||
+ (Basic && (Basic->getEncoding() == dwarf::DW_ATE_unsigned ||
+ Basic->getEncoding() == dwarf::DW_ATE_signed ||
+ Basic->getEncoding() == dwarf::DW_ATE_unsigned_char ||
+ Basic->getEncoding() == dwarf::DW_ATE_signed_char ||
+ Basic->getEncoding() == dwarf::DW_ATE_boolean)),
+ "invalid set base type", &N, T);
+ }
+ }
+
+ CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
+ CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
+ N.getRawBaseType());
+
+ if (N.getDWARFAddressSpace()) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
+ N.getTag() == dwarf::DW_TAG_reference_type ||
+ N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
+ "DWARF address space only applies to pointer or reference types",
+ &N);
+ }
+}
+
+/// Detect mutually exclusive flags.
+static bool hasConflictingReferenceFlags(unsigned Flags) {
+ return ((Flags & DINode::FlagLValueReference) &&
+ (Flags & DINode::FlagRValueReference)) ||
+ ((Flags & DINode::FlagTypePassByValue) &&
+ (Flags & DINode::FlagTypePassByReference));
+}
+
+void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
+ auto *Params = dyn_cast<MDTuple>(&RawParams);
+ CheckDI(Params, "invalid template params", &N, &RawParams);
+ for (Metadata *Op : Params->operands()) {
+ CheckDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
+ &N, Params, Op);
+ }
+}
+
+void Verifier::visitDICompositeType(const DICompositeType &N) {
+ // Common scope checks.
+ visitDIScope(N);
+
+ CheckDI(N.getTag() == dwarf::DW_TAG_array_type ||
+ N.getTag() == dwarf::DW_TAG_structure_type ||
+ N.getTag() == dwarf::DW_TAG_union_type ||
+ N.getTag() == dwarf::DW_TAG_enumeration_type ||
+ N.getTag() == dwarf::DW_TAG_class_type ||
+ N.getTag() == dwarf::DW_TAG_variant_part ||
+ N.getTag() == dwarf::DW_TAG_namelist,
+ "invalid tag", &N);
+
+ CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
+ CheckDI(isType(N.getRawBaseType()), "invalid base type", &N,
+ N.getRawBaseType());
+
+ CheckDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
+ "invalid composite elements", &N, N.getRawElements());
+ CheckDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
+ N.getRawVTableHolder());
+ CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
+ "invalid reference flags", &N);
+ unsigned DIBlockByRefStruct = 1 << 4;
+ CheckDI((N.getFlags() & DIBlockByRefStruct) == 0,
+ "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
+
+ if (N.isVector()) {
+ const DINodeArray Elements = N.getElements();
+ CheckDI(Elements.size() == 1 &&
+ Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
+ "invalid vector, expected one element of type subrange", &N);
+ }
+
+ if (auto *Params = N.getRawTemplateParams())
+ visitTemplateParams(N, *Params);
+
+ if (auto *D = N.getRawDiscriminator()) {
+ CheckDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
+ "discriminator can only appear on variant part");
+ }
+
+ if (N.getRawDataLocation()) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
+ "dataLocation can only appear in array type");
+ }
+
+ if (N.getRawAssociated()) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
+ "associated can only appear in array type");
+ }
+
+ if (N.getRawAllocated()) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
+ "allocated can only appear in array type");
+ }
+
+ if (N.getRawRank()) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_array_type,
+ "rank can only appear in array type");
+ }
+}
+
+void Verifier::visitDISubroutineType(const DISubroutineType &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
+ if (auto *Types = N.getRawTypeArray()) {
+ CheckDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
+ for (Metadata *Ty : N.getTypeArray()->operands()) {
+ CheckDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
+ }
+ }
+ CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
+ "invalid reference flags", &N);
+}
+
+void Verifier::visitDIFile(const DIFile &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
+ Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
+ if (Checksum) {
+ CheckDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
+ "invalid checksum kind", &N);
+ size_t Size;
+ switch (Checksum->Kind) {
+ case DIFile::CSK_MD5:
+ Size = 32;
+ break;
+ case DIFile::CSK_SHA1:
+ Size = 40;
+ break;
+ case DIFile::CSK_SHA256:
+ Size = 64;
+ break;
+ }
+ CheckDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
+ CheckDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
+ "invalid checksum", &N);
+ }
+}
+
+void Verifier::visitDICompileUnit(const DICompileUnit &N) {
+ CheckDI(N.isDistinct(), "compile units must be distinct", &N);
+ CheckDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
+
+ // Don't bother verifying the compilation directory or producer string
+ // as those could be empty.
+ CheckDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
+ N.getRawFile());
+ CheckDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
+ N.getFile());
+
+ CurrentSourceLang = (dwarf::SourceLanguage)N.getSourceLanguage();
+
+ verifySourceDebugInfo(N, *N.getFile());
+
+ CheckDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
+ "invalid emission kind", &N);
+
+ if (auto *Array = N.getRawEnumTypes()) {
+ CheckDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
+ for (Metadata *Op : N.getEnumTypes()->operands()) {
+ auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
+ CheckDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
+ "invalid enum type", &N, N.getEnumTypes(), Op);
+ }
+ }
+ if (auto *Array = N.getRawRetainedTypes()) {
+ CheckDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
+ for (Metadata *Op : N.getRetainedTypes()->operands()) {
+ CheckDI(
+ Op && (isa<DIType>(Op) || (isa<DISubprogram>(Op) &&
+ !cast<DISubprogram>(Op)->isDefinition())),
+ "invalid retained type", &N, Op);
+ }
+ }
+ if (auto *Array = N.getRawGlobalVariables()) {
+ CheckDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
+ for (Metadata *Op : N.getGlobalVariables()->operands()) {
+ CheckDI(Op && (isa<DIGlobalVariableExpression>(Op)),
+ "invalid global variable ref", &N, Op);
+ }
+ }
+ if (auto *Array = N.getRawImportedEntities()) {
+ CheckDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
+ for (Metadata *Op : N.getImportedEntities()->operands()) {
+ CheckDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
+ &N, Op);
+ }
+ }
+ if (auto *Array = N.getRawMacros()) {
+ CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
+ for (Metadata *Op : N.getMacros()->operands()) {
+ CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
+ }
+ }
+ CUVisited.insert(&N);
+}
+
+void Verifier::visitDISubprogram(const DISubprogram &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
+ CheckDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
+ if (auto *F = N.getRawFile())
+ CheckDI(isa<DIFile>(F), "invalid file", &N, F);
+ else
+ CheckDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
+ if (auto *T = N.getRawType())
+ CheckDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
+ CheckDI(isType(N.getRawContainingType()), "invalid containing type", &N,
+ N.getRawContainingType());
+ if (auto *Params = N.getRawTemplateParams())
+ visitTemplateParams(N, *Params);
+ if (auto *S = N.getRawDeclaration())
+ CheckDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
+ "invalid subprogram declaration", &N, S);
+ if (auto *RawNode = N.getRawRetainedNodes()) {
+ auto *Node = dyn_cast<MDTuple>(RawNode);
+ CheckDI(Node, "invalid retained nodes list", &N, RawNode);
+ for (Metadata *Op : Node->operands()) {
+ CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
+ "invalid retained nodes, expected DILocalVariable or DILabel", &N,
+ Node, Op);
+ }
+ }
+ CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
+ "invalid reference flags", &N);
+
+ auto *Unit = N.getRawUnit();
+ if (N.isDefinition()) {
+ // Subprogram definitions (not part of the type hierarchy).
+ CheckDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
+ CheckDI(Unit, "subprogram definitions must have a compile unit", &N);
+ CheckDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
+ if (N.getFile())
+ verifySourceDebugInfo(*N.getUnit(), *N.getFile());
+ } else {
+ // Subprogram declarations (part of the type hierarchy).
+ CheckDI(!Unit, "subprogram declarations must not have a compile unit", &N);
+ }
+
+ if (auto *RawThrownTypes = N.getRawThrownTypes()) {
+ auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
+ CheckDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
+ for (Metadata *Op : ThrownTypes->operands())
+ CheckDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
+ Op);
+ }
+
+ if (N.areAllCallsDescribed())
+ CheckDI(N.isDefinition(),
+ "DIFlagAllCallsDescribed must be attached to a definition");
+}
+
+void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
+ CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "invalid local scope", &N, N.getRawScope());
+ if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
+ CheckDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
+}
+
+void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
+ visitDILexicalBlockBase(N);
+
+ CheckDI(N.getLine() || !N.getColumn(),
+ "cannot have column info without line info", &N);
+}
+
+void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
+ visitDILexicalBlockBase(N);
+}
+
+void Verifier::visitDICommonBlock(const DICommonBlock &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
+ if (auto *S = N.getRawScope())
+ CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
+ if (auto *S = N.getRawDecl())
+ CheckDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
+}
+
+void Verifier::visitDINamespace(const DINamespace &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
+ if (auto *S = N.getRawScope())
+ CheckDI(isa<DIScope>(S), "invalid scope ref", &N, S);
+}
+
+void Verifier::visitDIMacro(const DIMacro &N) {
+ CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
+ N.getMacinfoType() == dwarf::DW_MACINFO_undef,
+ "invalid macinfo type", &N);
+ CheckDI(!N.getName().empty(), "anonymous macro", &N);
+ if (!N.getValue().empty()) {
+ assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
+ }
+}
+
+void Verifier::visitDIMacroFile(const DIMacroFile &N) {
+ CheckDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
+ "invalid macinfo type", &N);
+ if (auto *F = N.getRawFile())
+ CheckDI(isa<DIFile>(F), "invalid file", &N, F);
+
+ if (auto *Array = N.getRawElements()) {
+ CheckDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
+ for (Metadata *Op : N.getElements()->operands()) {
+ CheckDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
+ }
+ }
+}
+
+void Verifier::visitDIArgList(const DIArgList &N) {
+ CheckDI(!N.getNumOperands(),
+ "DIArgList should have no operands other than a list of "
+ "ValueAsMetadata",
+ &N);
+}
+
+void Verifier::visitDIModule(const DIModule &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
+ CheckDI(!N.getName().empty(), "anonymous module", &N);
+}
+
+void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
+ CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
+}
+
+void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
+ visitDITemplateParameter(N);
+
+ CheckDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
+ &N);
+}
+
+void Verifier::visitDITemplateValueParameter(
+ const DITemplateValueParameter &N) {
+ visitDITemplateParameter(N);
+
+ CheckDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
+ N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
+ N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
+ "invalid tag", &N);
+}
+
+void Verifier::visitDIVariable(const DIVariable &N) {
+ if (auto *S = N.getRawScope())
+ CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
+ if (auto *F = N.getRawFile())
+ CheckDI(isa<DIFile>(F), "invalid file", &N, F);
+}
+
+void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
+ // Checks common to all variables.
+ visitDIVariable(N);
+
+ CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
+ CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
+ // Check only if the global variable is not an extern
+ if (N.isDefinition())
+ CheckDI(N.getType(), "missing global variable type", &N);
+ if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
+ CheckDI(isa<DIDerivedType>(Member),
+ "invalid static data member declaration", &N, Member);
+ }
+}
+
+void Verifier::visitDILocalVariable(const DILocalVariable &N) {
+ // Checks common to all variables.
+ visitDIVariable(N);
+
+ CheckDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
+ CheckDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
+ CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "local variable requires a valid scope", &N, N.getRawScope());
+ if (auto Ty = N.getType())
+ CheckDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
+}
+
+void Verifier::visitDILabel(const DILabel &N) {
+ if (auto *S = N.getRawScope())
+ CheckDI(isa<DIScope>(S), "invalid scope", &N, S);
+ if (auto *F = N.getRawFile())
+ CheckDI(isa<DIFile>(F), "invalid file", &N, F);
+
+ CheckDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
+ CheckDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
+ "label requires a valid scope", &N, N.getRawScope());
+}
+
+void Verifier::visitDIExpression(const DIExpression &N) {
+ CheckDI(N.isValid(), "invalid expression", &N);
+}
+
+void Verifier::visitDIGlobalVariableExpression(
+ const DIGlobalVariableExpression &GVE) {
+ CheckDI(GVE.getVariable(), "missing variable");
+ if (auto *Var = GVE.getVariable())
+ visitDIGlobalVariable(*Var);
+ if (auto *Expr = GVE.getExpression()) {
+ visitDIExpression(*Expr);
+ if (auto Fragment = Expr->getFragmentInfo())
+ verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
+ }
+}
+
+void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
+ if (auto *T = N.getRawType())
+ CheckDI(isType(T), "invalid type ref", &N, T);
+ if (auto *F = N.getRawFile())
+ CheckDI(isa<DIFile>(F), "invalid file", &N, F);
+}
+
+void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
+ CheckDI(N.getTag() == dwarf::DW_TAG_imported_module ||
+ N.getTag() == dwarf::DW_TAG_imported_declaration,
+ "invalid tag", &N);
+ if (auto *S = N.getRawScope())
+ CheckDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
+ CheckDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
+ N.getRawEntity());
+}
+
+void Verifier::visitComdat(const Comdat &C) {
+ // In COFF the Module is invalid if the GlobalValue has private linkage.
+ // Entities with private linkage don't have entries in the symbol table.
+ if (TT.isOSBinFormatCOFF())
+ if (const GlobalValue *GV = M.getNamedValue(C.getName()))
+ Check(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
+ GV);
+}
+
+void Verifier::visitModuleIdents() {
+ const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
+ if (!Idents)
+ return;
+
+ // llvm.ident takes a list of metadata entry. Each entry has only one string.
+ // Scan each llvm.ident entry and make sure that this requirement is met.
+ for (const MDNode *N : Idents->operands()) {
+ Check(N->getNumOperands() == 1,
+ "incorrect number of operands in llvm.ident metadata", N);
+ Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
+ ("invalid value for llvm.ident metadata entry operand"
+ "(the operand should be a string)"),
+ N->getOperand(0));
+ }
+}
+
+void Verifier::visitModuleCommandLines() {
+ const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
+ if (!CommandLines)
+ return;
+
+ // llvm.commandline takes a list of metadata entry. Each entry has only one
+ // string. Scan each llvm.commandline entry and make sure that this
+ // requirement is met.
+ for (const MDNode *N : CommandLines->operands()) {
+ Check(N->getNumOperands() == 1,
+ "incorrect number of operands in llvm.commandline metadata", N);
+ Check(dyn_cast_or_null<MDString>(N->getOperand(0)),
+ ("invalid value for llvm.commandline metadata entry operand"
+ "(the operand should be a string)"),
+ N->getOperand(0));
+ }
+}
+
+void Verifier::visitModuleFlags() {
+ const NamedMDNode *Flags = M.getModuleFlagsMetadata();
+ if (!Flags) return;
+
+ // Scan each flag, and track the flags and requirements.
+ DenseMap<const MDString*, const MDNode*> SeenIDs;
+ SmallVector<const MDNode*, 16> Requirements;
+ for (const MDNode *MDN : Flags->operands())
+ visitModuleFlag(MDN, SeenIDs, Requirements);
+
+ // Validate that the requirements in the module are valid.
+ for (const MDNode *Requirement : Requirements) {
+ const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
+ const Metadata *ReqValue = Requirement->getOperand(1);
+
+ const MDNode *Op = SeenIDs.lookup(Flag);
+ if (!Op) {
+ CheckFailed("invalid requirement on flag, flag is not present in module",
+ Flag);
+ continue;
+ }
+
+ if (Op->getOperand(2) != ReqValue) {
+ CheckFailed(("invalid requirement on flag, "
+ "flag does not have the required value"),
+ Flag);
+ continue;
+ }
+ }
+}
+
+void
+Verifier::visitModuleFlag(const MDNode *Op,
+ DenseMap<const MDString *, const MDNode *> &SeenIDs,
+ SmallVectorImpl<const MDNode *> &Requirements) {
+ // Each module flag should have three arguments, the merge behavior (a
+ // constant int), the flag ID (an MDString), and the value.
+ Check(Op->getNumOperands() == 3,
+ "incorrect number of operands in module flag", Op);
+ Module::ModFlagBehavior MFB;
+ if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
+ Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
+ "invalid behavior operand in module flag (expected constant integer)",
+ Op->getOperand(0));
+ Check(false,
+ "invalid behavior operand in module flag (unexpected constant)",
+ Op->getOperand(0));
+ }
+ MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
+ Check(ID, "invalid ID operand in module flag (expected metadata string)",
+ Op->getOperand(1));
+
+ // Check the values for behaviors with additional requirements.
+ switch (MFB) {
+ case Module::Error:
+ case Module::Warning:
+ case Module::Override:
+ // These behavior types accept any value.
+ break;
+
+ case Module::Min: {
+ Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
+ "invalid value for 'min' module flag (expected constant integer)",
+ Op->getOperand(2));
+ break;
+ }
+
+ case Module::Max: {
+ Check(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
+ "invalid value for 'max' module flag (expected constant integer)",
+ Op->getOperand(2));
+ break;
+ }
+
+ case Module::Require: {
+ // The value should itself be an MDNode with two operands, a flag ID (an
+ // MDString), and a value.
+ MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
+ Check(Value && Value->getNumOperands() == 2,
+ "invalid value for 'require' module flag (expected metadata pair)",
+ Op->getOperand(2));
+ Check(isa<MDString>(Value->getOperand(0)),
+ ("invalid value for 'require' module flag "
+ "(first value operand should be a string)"),
+ Value->getOperand(0));
+
+ // Append it to the list of requirements, to check once all module flags are
+ // scanned.
+ Requirements.push_back(Value);
+ break;
+ }
+
+ case Module::Append:
+ case Module::AppendUnique: {
+ // These behavior types require the operand be an MDNode.
+ Check(isa<MDNode>(Op->getOperand(2)),
+ "invalid value for 'append'-type module flag "
+ "(expected a metadata node)",
+ Op->getOperand(2));
+ break;
+ }
+ }
+
+ // Unless this is a "requires" flag, check the ID is unique.
+ if (MFB != Module::Require) {
+ bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
+ Check(Inserted,
+ "module flag identifiers must be unique (or of 'require' type)", ID);
+ }
+
+ if (ID->getString() == "wchar_size") {
+ ConstantInt *Value
+ = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
+ Check(Value, "wchar_size metadata requires constant integer argument");
+ }
+
+ if (ID->getString() == "Linker Options") {
+ // If the llvm.linker.options named metadata exists, we assume that the
+ // bitcode reader has upgraded the module flag. Otherwise the flag might
+ // have been created by a client directly.
+ Check(M.getNamedMetadata("llvm.linker.options"),
+ "'Linker Options' named metadata no longer supported");
+ }
+
+ if (ID->getString() == "SemanticInterposition") {
+ ConstantInt *Value =
+ mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
+ Check(Value,
+ "SemanticInterposition metadata requires constant integer argument");
+ }
+
+ if (ID->getString() == "CG Profile") {
+ for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
+ visitModuleFlagCGProfileEntry(MDO);
+ }
+}
+
+void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
+ auto CheckFunction = [&](const MDOperand &FuncMDO) {
+ if (!FuncMDO)
+ return;
+ auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
+ Check(F && isa<Function>(F->getValue()->stripPointerCasts()),
+ "expected a Function or null", FuncMDO);
+ };
+ auto Node = dyn_cast_or_null<MDNode>(MDO);
+ Check(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
+ CheckFunction(Node->getOperand(0));
+ CheckFunction(Node->getOperand(1));
+ auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
+ Check(Count && Count->getType()->isIntegerTy(),
+ "expected an integer constant", Node->getOperand(2));
+}
+
+void Verifier::verifyAttributeTypes(AttributeSet Attrs, const Value *V) {
+ for (Attribute A : Attrs) {
+
+ if (A.isStringAttribute()) {
+#define GET_ATTR_NAMES
+#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME)
+#define ATTRIBUTE_STRBOOL(ENUM_NAME, DISPLAY_NAME) \
+ if (A.getKindAsString() == #DISPLAY_NAME) { \
+ auto V = A.getValueAsString(); \
+ if (!(V.empty() || V == "true" || V == "false")) \
+ CheckFailed("invalid value for '" #DISPLAY_NAME "' attribute: " + V + \
+ ""); \
+ }
+
+#include "llvm/IR/Attributes.inc"
+ continue;
+ }
+
+ if (A.isIntAttribute() != Attribute::isIntAttrKind(A.getKindAsEnum())) {
+ CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
+ V);
+ return;
+ }
+ }
+}
+
+// VerifyParameterAttrs - Check the given attributes for an argument or return
+// value of the specified type. The value V is printed in error messages.
+void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
+ const Value *V) {
+ if (!Attrs.hasAttributes())
+ return;
+
+ verifyAttributeTypes(Attrs, V);
+
+ for (Attribute Attr : Attrs)
+ Check(Attr.isStringAttribute() ||
+ Attribute::canUseAsParamAttr(Attr.getKindAsEnum()),
+ "Attribute '" + Attr.getAsString() + "' does not apply to parameters",
+ V);
+
+ if (Attrs.hasAttribute(Attribute::ImmArg)) {
+ Check(Attrs.getNumAttributes() == 1,
+ "Attribute 'immarg' is incompatible with other attributes", V);
+ }
+
+ // Check for mutually incompatible attributes. Only inreg is compatible with
+ // sret.
+ unsigned AttrCount = 0;
+ AttrCount += Attrs.hasAttribute(Attribute::ByVal);
+ AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
+ AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
+ AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
+ Attrs.hasAttribute(Attribute::InReg);
+ AttrCount += Attrs.hasAttribute(Attribute::Nest);
+ AttrCount += Attrs.hasAttribute(Attribute::ByRef);
+ Check(AttrCount <= 1,
+ "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
+ "'byref', and 'sret' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::InAlloca) &&
+ Attrs.hasAttribute(Attribute::ReadOnly)),
+ "Attributes "
+ "'inalloca and readonly' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::StructRet) &&
+ Attrs.hasAttribute(Attribute::Returned)),
+ "Attributes "
+ "'sret and returned' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::ZExt) &&
+ Attrs.hasAttribute(Attribute::SExt)),
+ "Attributes "
+ "'zeroext and signext' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
+ Attrs.hasAttribute(Attribute::ReadOnly)),
+ "Attributes "
+ "'readnone and readonly' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::ReadNone) &&
+ Attrs.hasAttribute(Attribute::WriteOnly)),
+ "Attributes "
+ "'readnone and writeonly' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
+ Attrs.hasAttribute(Attribute::WriteOnly)),
+ "Attributes "
+ "'readonly and writeonly' are incompatible!",
+ V);
+
+ Check(!(Attrs.hasAttribute(Attribute::NoInline) &&
+ Attrs.hasAttribute(Attribute::AlwaysInline)),
+ "Attributes "
+ "'noinline and alwaysinline' are incompatible!",
+ V);
+
+ AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
+ for (Attribute Attr : Attrs) {
+ if (!Attr.isStringAttribute() &&
+ IncompatibleAttrs.contains(Attr.getKindAsEnum())) {
+ CheckFailed("Attribute '" + Attr.getAsString() +
+ "' applied to incompatible type!", V);
+ return;
+ }
+ }
+
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ if (Attrs.hasAttribute(Attribute::ByVal)) {
+ if (Attrs.hasAttribute(Attribute::Alignment)) {
+ Align AttrAlign = Attrs.getAlignment().valueOrOne();
+ Align MaxAlign(ParamMaxAlignment);
+ Check(AttrAlign <= MaxAlign,
+ "Attribute 'align' exceed the max size 2^14", V);
+ }
+ SmallPtrSet<Type *, 4> Visited;
+ Check(Attrs.getByValType()->isSized(&Visited),
+ "Attribute 'byval' does not support unsized types!", V);
+ }
+ if (Attrs.hasAttribute(Attribute::ByRef)) {
+ SmallPtrSet<Type *, 4> Visited;
+ Check(Attrs.getByRefType()->isSized(&Visited),
+ "Attribute 'byref' does not support unsized types!", V);
+ }
+ if (Attrs.hasAttribute(Attribute::InAlloca)) {
+ SmallPtrSet<Type *, 4> Visited;
+ Check(Attrs.getInAllocaType()->isSized(&Visited),
+ "Attribute 'inalloca' does not support unsized types!", V);
+ }
+ if (Attrs.hasAttribute(Attribute::Preallocated)) {
+ SmallPtrSet<Type *, 4> Visited;
+ Check(Attrs.getPreallocatedType()->isSized(&Visited),
+ "Attribute 'preallocated' does not support unsized types!", V);
+ }
+ if (!PTy->isOpaque()) {
+ if (!isa<PointerType>(PTy->getNonOpaquePointerElementType()))
+ Check(!Attrs.hasAttribute(Attribute::SwiftError),
+ "Attribute 'swifterror' only applies to parameters "
+ "with pointer to pointer type!",
+ V);
+ if (Attrs.hasAttribute(Attribute::ByRef)) {
+ Check(Attrs.getByRefType() == PTy->getNonOpaquePointerElementType(),
+ "Attribute 'byref' type does not match parameter!", V);
+ }
+
+ if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
+ Check(Attrs.getByValType() == PTy->getNonOpaquePointerElementType(),
+ "Attribute 'byval' type does not match parameter!", V);
+ }
+
+ if (Attrs.hasAttribute(Attribute::Preallocated)) {
+ Check(Attrs.getPreallocatedType() ==
+ PTy->getNonOpaquePointerElementType(),
+ "Attribute 'preallocated' type does not match parameter!", V);
+ }
+
+ if (Attrs.hasAttribute(Attribute::InAlloca)) {
+ Check(Attrs.getInAllocaType() == PTy->getNonOpaquePointerElementType(),
+ "Attribute 'inalloca' type does not match parameter!", V);
+ }
+
+ if (Attrs.hasAttribute(Attribute::ElementType)) {
+ Check(Attrs.getElementType() == PTy->getNonOpaquePointerElementType(),
+ "Attribute 'elementtype' type does not match parameter!", V);
+ }
+ }
+ }
+}
+
+void Verifier::checkUnsignedBaseTenFuncAttr(AttributeList Attrs, StringRef Attr,
+ const Value *V) {
+ if (Attrs.hasFnAttr(Attr)) {
+ StringRef S = Attrs.getFnAttr(Attr).getValueAsString();
+ unsigned N;
+ if (S.getAsInteger(10, N))
+ CheckFailed("\"" + Attr + "\" takes an unsigned integer: " + S, V);
+ }
+}
+
+// Check parameter attributes against a function type.
+// The value V is printed in error messages.
+void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
+ const Value *V, bool IsIntrinsic,
+ bool IsInlineAsm) {
+ if (Attrs.isEmpty())
+ return;
+
+ if (AttributeListsVisited.insert(Attrs.getRawPointer()).second) {
+ Check(Attrs.hasParentContext(Context),
+ "Attribute list does not match Module context!", &Attrs, V);
+ for (const auto &AttrSet : Attrs) {
+ Check(!AttrSet.hasAttributes() || AttrSet.hasParentContext(Context),
+ "Attribute set does not match Module context!", &AttrSet, V);
+ for (const auto &A : AttrSet) {
+ Check(A.hasParentContext(Context),
+ "Attribute does not match Module context!", &A, V);
+ }
+ }
+ }
+
+ bool SawNest = false;
+ bool SawReturned = false;
+ bool SawSRet = false;
+ bool SawSwiftSelf = false;
+ bool SawSwiftAsync = false;
+ bool SawSwiftError = false;
+
+ // Verify return value attributes.
+ AttributeSet RetAttrs = Attrs.getRetAttrs();
+ for (Attribute RetAttr : RetAttrs)
+ Check(RetAttr.isStringAttribute() ||
+ Attribute::canUseAsRetAttr(RetAttr.getKindAsEnum()),
+ "Attribute '" + RetAttr.getAsString() +
+ "' does not apply to function return values",
+ V);
+
+ verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
+
+ // Verify parameter attributes.
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
+ Type *Ty = FT->getParamType(i);
+ AttributeSet ArgAttrs = Attrs.getParamAttrs(i);
+
+ if (!IsIntrinsic) {
+ Check(!ArgAttrs.hasAttribute(Attribute::ImmArg),
+ "immarg attribute only applies to intrinsics", V);
+ if (!IsInlineAsm)
+ Check(!ArgAttrs.hasAttribute(Attribute::ElementType),
+ "Attribute 'elementtype' can only be applied to intrinsics"
+ " and inline asm.",
+ V);
+ }
+
+ verifyParameterAttrs(ArgAttrs, Ty, V);
+
+ if (ArgAttrs.hasAttribute(Attribute::Nest)) {
+ Check(!SawNest, "More than one parameter has attribute nest!", V);
+ SawNest = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::Returned)) {
+ Check(!SawReturned, "More than one parameter has attribute returned!", V);
+ Check(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
+ "Incompatible argument and return types for 'returned' attribute",
+ V);
+ SawReturned = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
+ Check(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
+ Check(i == 0 || i == 1,
+ "Attribute 'sret' is not on first or second parameter!", V);
+ SawSRet = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
+ Check(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
+ SawSwiftSelf = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::SwiftAsync)) {
+ Check(!SawSwiftAsync, "Cannot have multiple 'swiftasync' parameters!", V);
+ SawSwiftAsync = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
+ Check(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!", V);
+ SawSwiftError = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
+ Check(i == FT->getNumParams() - 1,
+ "inalloca isn't on the last parameter!", V);
+ }
+ }
+
+ if (!Attrs.hasFnAttrs())
+ return;
+
+ verifyAttributeTypes(Attrs.getFnAttrs(), V);
+ for (Attribute FnAttr : Attrs.getFnAttrs())
+ Check(FnAttr.isStringAttribute() ||
+ Attribute::canUseAsFnAttr(FnAttr.getKindAsEnum()),
+ "Attribute '" + FnAttr.getAsString() +
+ "' does not apply to functions!",
+ V);
+
+ Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
+ Attrs.hasFnAttr(Attribute::ReadOnly)),
+ "Attributes 'readnone and readonly' are incompatible!", V);
+
+ Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
+ Attrs.hasFnAttr(Attribute::WriteOnly)),
+ "Attributes 'readnone and writeonly' are incompatible!", V);
+
+ Check(!(Attrs.hasFnAttr(Attribute::ReadOnly) &&
+ Attrs.hasFnAttr(Attribute::WriteOnly)),
+ "Attributes 'readonly and writeonly' are incompatible!", V);
+
+ Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
+ Attrs.hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly)),
+ "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
+ "incompatible!",
+ V);
+
+ Check(!(Attrs.hasFnAttr(Attribute::ReadNone) &&
+ Attrs.hasFnAttr(Attribute::InaccessibleMemOnly)),
+ "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
+
+ Check(!(Attrs.hasFnAttr(Attribute::NoInline) &&
+ Attrs.hasFnAttr(Attribute::AlwaysInline)),
+ "Attributes 'noinline and alwaysinline' are incompatible!", V);
+
+ if (Attrs.hasFnAttr(Attribute::OptimizeNone)) {
+ Check(Attrs.hasFnAttr(Attribute::NoInline),
+ "Attribute 'optnone' requires 'noinline'!", V);
+
+ Check(!Attrs.hasFnAttr(Attribute::OptimizeForSize),
+ "Attributes 'optsize and optnone' are incompatible!", V);
+
+ Check(!Attrs.hasFnAttr(Attribute::MinSize),
+ "Attributes 'minsize and optnone' are incompatible!", V);
+ }
+
+ if (Attrs.hasFnAttr(Attribute::JumpTable)) {
+ const GlobalValue *GV = cast<GlobalValue>(V);
+ Check(GV->hasGlobalUnnamedAddr(),
+ "Attribute 'jumptable' requires 'unnamed_addr'", V);
+ }
+
+ if (Attrs.hasFnAttr(Attribute::AllocSize)) {
+ std::pair<unsigned, Optional<unsigned>> Args =
+ Attrs.getFnAttrs().getAllocSizeArgs();
+
+ auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
+ if (ParamNo >= FT->getNumParams()) {
+ CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
+ return false;
+ }
+
+ if (!FT->getParamType(ParamNo)->isIntegerTy()) {
+ CheckFailed("'allocsize' " + Name +
+ " argument must refer to an integer parameter",
+ V);
+ return false;
+ }
+
+ return true;
+ };
+
+ if (!CheckParam("element size", Args.first))
+ return;
+
+ if (Args.second && !CheckParam("number of elements", *Args.second))
+ return;
+ }
+
+ if (Attrs.hasFnAttr(Attribute::AllocKind)) {
+ AllocFnKind K = Attrs.getAllocKind();
+ AllocFnKind Type =
+ K & (AllocFnKind::Alloc | AllocFnKind::Realloc | AllocFnKind::Free);
+ if (!is_contained(
+ {AllocFnKind::Alloc, AllocFnKind::Realloc, AllocFnKind::Free},
+ Type))
+ CheckFailed(
+ "'allockind()' requires exactly one of alloc, realloc, and free");
+ if ((Type == AllocFnKind::Free) &&
+ ((K & (AllocFnKind::Uninitialized | AllocFnKind::Zeroed |
+ AllocFnKind::Aligned)) != AllocFnKind::Unknown))
+ CheckFailed("'allockind(\"free\")' doesn't allow uninitialized, zeroed, "
+ "or aligned modifiers.");
+ AllocFnKind ZeroedUninit = AllocFnKind::Uninitialized | AllocFnKind::Zeroed;
+ if ((K & ZeroedUninit) == ZeroedUninit)
+ CheckFailed("'allockind()' can't be both zeroed and uninitialized");
+ }
+
+ if (Attrs.hasFnAttr(Attribute::VScaleRange)) {
+ unsigned VScaleMin = Attrs.getFnAttrs().getVScaleRangeMin();
+ if (VScaleMin == 0)
+ CheckFailed("'vscale_range' minimum must be greater than 0", V);
+
+ Optional<unsigned> VScaleMax = Attrs.getFnAttrs().getVScaleRangeMax();
+ if (VScaleMax && VScaleMin > VScaleMax)
+ CheckFailed("'vscale_range' minimum cannot be greater than maximum", V);
+ }
+
+ if (Attrs.hasFnAttr("frame-pointer")) {
+ StringRef FP = Attrs.getFnAttr("frame-pointer").getValueAsString();
+ if (FP != "all" && FP != "non-leaf" && FP != "none")
+ CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
+ }
+
+ checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-prefix", V);
+ checkUnsignedBaseTenFuncAttr(Attrs, "patchable-function-entry", V);
+ checkUnsignedBaseTenFuncAttr(Attrs, "warn-stack-size", V);
+}
+
+void Verifier::verifyFunctionMetadata(
+ ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
+ for (const auto &Pair : MDs) {
+ if (Pair.first == LLVMContext::MD_prof) {
+ MDNode *MD = Pair.second;
+ Check(MD->getNumOperands() >= 2,
+ "!prof annotations should have no less than 2 operands", MD);
+
+ // Check first operand.
+ Check(MD->getOperand(0) != nullptr, "first operand should not be null",
+ MD);
+ Check(isa<MDString>(MD->getOperand(0)),
+ "expected string with name of the !prof annotation", MD);
+ MDString *MDS = cast<MDString>(MD->getOperand(0));
+ StringRef ProfName = MDS->getString();
+ Check(ProfName.equals("function_entry_count") ||
+ ProfName.equals("synthetic_function_entry_count"),
+ "first operand should be 'function_entry_count'"
+ " or 'synthetic_function_entry_count'",
+ MD);
+
+ // Check second operand.
+ Check(MD->getOperand(1) != nullptr, "second operand should not be null",
+ MD);
+ Check(isa<ConstantAsMetadata>(MD->getOperand(1)),
+ "expected integer argument to function_entry_count", MD);
+ }
+ }
+}
+
+void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
+ if (!ConstantExprVisited.insert(EntryC).second)
+ return;
+
+ SmallVector<const Constant *, 16> Stack;
+ Stack.push_back(EntryC);
+
+ while (!Stack.empty()) {
+ const Constant *C = Stack.pop_back_val();
+
+ // Check this constant expression.
+ if (const auto *CE = dyn_cast<ConstantExpr>(C))
+ visitConstantExpr(CE);
+
+ if (const auto *GV = dyn_cast<GlobalValue>(C)) {
+ // Global Values get visited separately, but we do need to make sure
+ // that the global value is in the correct module
+ Check(GV->getParent() == &M, "Referencing global in another module!",
+ EntryC, &M, GV, GV->getParent());
+ continue;
+ }
+
+ // Visit all sub-expressions.
+ for (const Use &U : C->operands()) {
+ const auto *OpC = dyn_cast<Constant>(U);
+ if (!OpC)
+ continue;
+ if (!ConstantExprVisited.insert(OpC).second)
+ continue;
+ Stack.push_back(OpC);
+ }
+ }
+}
+
+void Verifier::visitConstantExpr(const ConstantExpr *CE) {
+ if (CE->getOpcode() == Instruction::BitCast)
+ Check(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
+ CE->getType()),
+ "Invalid bitcast", CE);
+}
+
+bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
+ // There shouldn't be more attribute sets than there are parameters plus the
+ // function and return value.
+ return Attrs.getNumAttrSets() <= Params + 2;
+}
+
+void Verifier::verifyInlineAsmCall(const CallBase &Call) {
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
+ unsigned ArgNo = 0;
+ for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
+ // Only deal with constraints that correspond to call arguments.
+ if (!CI.hasArg())
+ continue;
+
+ if (CI.isIndirect) {
+ const Value *Arg = Call.getArgOperand(ArgNo);
+ Check(Arg->getType()->isPointerTy(),
+ "Operand for indirect constraint must have pointer type", &Call);
+
+ Check(Call.getParamElementType(ArgNo),
+ "Operand for indirect constraint must have elementtype attribute",
+ &Call);
+ } else {
+ Check(!Call.paramHasAttr(ArgNo, Attribute::ElementType),
+ "Elementtype attribute can only be applied for indirect "
+ "constraints",
+ &Call);
+ }
+
+ ArgNo++;
+ }
+}
+
+/// Verify that statepoint intrinsic is well formed.
+void Verifier::verifyStatepoint(const CallBase &Call) {
+ assert(Call.getCalledFunction() &&
+ Call.getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::experimental_gc_statepoint);
+
+ Check(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
+ !Call.onlyAccessesArgMemory(),
+ "gc.statepoint must read and write all memory to preserve "
+ "reordering restrictions required by safepoint semantics",
+ Call);
+
+ const int64_t NumPatchBytes =
+ cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
+ assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
+ Check(NumPatchBytes >= 0,
+ "gc.statepoint number of patchable bytes must be "
+ "positive",
+ Call);
+
+ Type *TargetElemType = Call.getParamElementType(2);
+ Check(TargetElemType,
+ "gc.statepoint callee argument must have elementtype attribute", Call);
+ FunctionType *TargetFuncType = dyn_cast<FunctionType>(TargetElemType);
+ Check(TargetFuncType,
+ "gc.statepoint callee elementtype must be function type", Call);
+
+ const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
+ Check(NumCallArgs >= 0,
+ "gc.statepoint number of arguments to underlying call "
+ "must be positive",
+ Call);
+ const int NumParams = (int)TargetFuncType->getNumParams();
+ if (TargetFuncType->isVarArg()) {
+ Check(NumCallArgs >= NumParams,
+ "gc.statepoint mismatch in number of vararg call args", Call);
+
+ // TODO: Remove this limitation
+ Check(TargetFuncType->getReturnType()->isVoidTy(),
+ "gc.statepoint doesn't support wrapping non-void "
+ "vararg functions yet",
+ Call);
+ } else
+ Check(NumCallArgs == NumParams,
+ "gc.statepoint mismatch in number of call args", Call);
+
+ const uint64_t Flags
+ = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
+ Check((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
+ "unknown flag used in gc.statepoint flags argument", Call);
+
+ // Verify that the types of the call parameter arguments match
+ // the type of the wrapped callee.
+ AttributeList Attrs = Call.getAttributes();
+ for (int i = 0; i < NumParams; i++) {
+ Type *ParamType = TargetFuncType->getParamType(i);
+ Type *ArgType = Call.getArgOperand(5 + i)->getType();
+ Check(ArgType == ParamType,
+ "gc.statepoint call argument does not match wrapped "
+ "function type",
+ Call);
+
+ if (TargetFuncType->isVarArg()) {
+ AttributeSet ArgAttrs = Attrs.getParamAttrs(5 + i);
+ Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
+ "Attribute 'sret' cannot be used for vararg call arguments!", Call);
+ }
+ }
+
+ const int EndCallArgsInx = 4 + NumCallArgs;
+
+ const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
+ Check(isa<ConstantInt>(NumTransitionArgsV),
+ "gc.statepoint number of transition arguments "
+ "must be constant integer",
+ Call);
+ const int NumTransitionArgs =
+ cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
+ Check(NumTransitionArgs == 0,
+ "gc.statepoint w/inline transition bundle is deprecated", Call);
+ const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
+
+ const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
+ Check(isa<ConstantInt>(NumDeoptArgsV),
+ "gc.statepoint number of deoptimization arguments "
+ "must be constant integer",
+ Call);
+ const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
+ Check(NumDeoptArgs == 0,
+ "gc.statepoint w/inline deopt operands is deprecated", Call);
+
+ const int ExpectedNumArgs = 7 + NumCallArgs;
+ Check(ExpectedNumArgs == (int)Call.arg_size(),
+ "gc.statepoint too many arguments", Call);
+
+ // Check that the only uses of this gc.statepoint are gc.result or
+ // gc.relocate calls which are tied to this statepoint and thus part
+ // of the same statepoint sequence
+ for (const User *U : Call.users()) {
+ const CallInst *UserCall = dyn_cast<const CallInst>(U);
+ Check(UserCall, "illegal use of statepoint token", Call, U);
+ if (!UserCall)
+ continue;
+ Check(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
+ "gc.result or gc.relocate are the only value uses "
+ "of a gc.statepoint",
+ Call, U);
+ if (isa<GCResultInst>(UserCall)) {
+ Check(UserCall->getArgOperand(0) == &Call,
+ "gc.result connected to wrong gc.statepoint", Call, UserCall);
+ } else if (isa<GCRelocateInst>(Call)) {
+ Check(UserCall->getArgOperand(0) == &Call,
+ "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
+ }
+ }
+
+ // Note: It is legal for a single derived pointer to be listed multiple
+ // times. It's non-optimal, but it is legal. It can also happen after
+ // insertion if we strip a bitcast away.
+ // Note: It is really tempting to check that each base is relocated and
+ // that a derived pointer is never reused as a base pointer. This turns
+ // out to be problematic since optimizations run after safepoint insertion
+ // can recognize equality properties that the insertion logic doesn't know
+ // about. See example statepoint.ll in the verifier subdirectory
+}
+
+void Verifier::verifyFrameRecoverIndices() {
+ for (auto &Counts : FrameEscapeInfo) {
+ Function *F = Counts.first;
+ unsigned EscapedObjectCount = Counts.second.first;
+ unsigned MaxRecoveredIndex = Counts.second.second;
+ Check(MaxRecoveredIndex <= EscapedObjectCount,
+ "all indices passed to llvm.localrecover must be less than the "
+ "number of arguments passed to llvm.localescape in the parent "
+ "function",
+ F);
+ }
+}
+
+static Instruction *getSuccPad(Instruction *Terminator) {
+ BasicBlock *UnwindDest;
+ if (auto *II = dyn_cast<InvokeInst>(Terminator))
+ UnwindDest = II->getUnwindDest();
+ else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
+ UnwindDest = CSI->getUnwindDest();
+ else
+ UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
+ return UnwindDest->getFirstNonPHI();
+}
+
+void Verifier::verifySiblingFuncletUnwinds() {
+ SmallPtrSet<Instruction *, 8> Visited;
+ SmallPtrSet<Instruction *, 8> Active;
+ for (const auto &Pair : SiblingFuncletInfo) {
+ Instruction *PredPad = Pair.first;
+ if (Visited.count(PredPad))
+ continue;
+ Active.insert(PredPad);
+ Instruction *Terminator = Pair.second;
+ do {
+ Instruction *SuccPad = getSuccPad(Terminator);
+ if (Active.count(SuccPad)) {
+ // Found a cycle; report error
+ Instruction *CyclePad = SuccPad;
+ SmallVector<Instruction *, 8> CycleNodes;
+ do {
+ CycleNodes.push_back(CyclePad);
+ Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
+ if (CycleTerminator != CyclePad)
+ CycleNodes.push_back(CycleTerminator);
+ CyclePad = getSuccPad(CycleTerminator);
+ } while (CyclePad != SuccPad);
+ Check(false, "EH pads can't handle each other's exceptions",
+ ArrayRef<Instruction *>(CycleNodes));
+ }
+ // Don't re-walk a node we've already checked
+ if (!Visited.insert(SuccPad).second)
+ break;
+ // Walk to this successor if it has a map entry.
+ PredPad = SuccPad;
+ auto TermI = SiblingFuncletInfo.find(PredPad);
+ if (TermI == SiblingFuncletInfo.end())
+ break;
+ Terminator = TermI->second;
+ Active.insert(PredPad);
+ } while (true);
+ // Each node only has one successor, so we've walked all the active
+ // nodes' successors.
+ Active.clear();
+ }
+}
+
+// visitFunction - Verify that a function is ok.
+//
+void Verifier::visitFunction(const Function &F) {
+ visitGlobalValue(F);
+
+ // Check function arguments.
+ FunctionType *FT = F.getFunctionType();
+ unsigned NumArgs = F.arg_size();
+
+ Check(&Context == &F.getContext(),
+ "Function context does not match Module context!", &F);
+
+ Check(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
+ Check(FT->getNumParams() == NumArgs,
+ "# formal arguments must match # of arguments for function type!", &F,
+ FT);
+ Check(F.getReturnType()->isFirstClassType() ||
+ F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
+ "Functions cannot return aggregate values!", &F);
+
+ Check(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
+ "Invalid struct return type!", &F);
+
+ AttributeList Attrs = F.getAttributes();
+
+ Check(verifyAttributeCount(Attrs, FT->getNumParams()),
+ "Attribute after last parameter!", &F);
+
+ bool IsIntrinsic = F.isIntrinsic();
+
+ // Check function attributes.
+ verifyFunctionAttrs(FT, Attrs, &F, IsIntrinsic, /* IsInlineAsm */ false);
+
+ // On function declarations/definitions, we do not support the builtin
+ // attribute. We do not check this in VerifyFunctionAttrs since that is
+ // checking for Attributes that can/can not ever be on functions.
+ Check(!Attrs.hasFnAttr(Attribute::Builtin),
+ "Attribute 'builtin' can only be applied to a callsite.", &F);
+
+ Check(!Attrs.hasAttrSomewhere(Attribute::ElementType),
+ "Attribute 'elementtype' can only be applied to a callsite.", &F);
+
+ // Check that this function meets the restrictions on this calling convention.
+ // Sometimes varargs is used for perfectly forwarding thunks, so some of these
+ // restrictions can be lifted.
+ switch (F.getCallingConv()) {
+ default:
+ case CallingConv::C:
+ break;
+ case CallingConv::X86_INTR: {
+ Check(F.arg_empty() || Attrs.hasParamAttr(0, Attribute::ByVal),
+ "Calling convention parameter requires byval", &F);
+ break;
+ }
+ case CallingConv::AMDGPU_KERNEL:
+ case CallingConv::SPIR_KERNEL:
+ Check(F.getReturnType()->isVoidTy(),
+ "Calling convention requires void return type", &F);
+ LLVM_FALLTHROUGH;
+ case CallingConv::AMDGPU_VS:
+ case CallingConv::AMDGPU_HS:
+ case CallingConv::AMDGPU_GS:
+ case CallingConv::AMDGPU_PS:
+ case CallingConv::AMDGPU_CS:
+ Check(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F);
+ if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
+ const unsigned StackAS = DL.getAllocaAddrSpace();
+ unsigned i = 0;
+ for (const Argument &Arg : F.args()) {
+ Check(!Attrs.hasParamAttr(i, Attribute::ByVal),
+ "Calling convention disallows byval", &F);
+ Check(!Attrs.hasParamAttr(i, Attribute::Preallocated),
+ "Calling convention disallows preallocated", &F);
+ Check(!Attrs.hasParamAttr(i, Attribute::InAlloca),
+ "Calling convention disallows inalloca", &F);
+
+ if (Attrs.hasParamAttr(i, Attribute::ByRef)) {
+ // FIXME: Should also disallow LDS and GDS, but we don't have the enum
+ // value here.
+ Check(Arg.getType()->getPointerAddressSpace() != StackAS,
+ "Calling convention disallows stack byref", &F);
+ }
+
+ ++i;
+ }
+ }
+
+ LLVM_FALLTHROUGH;
+ case CallingConv::Fast:
+ case CallingConv::Cold:
+ case CallingConv::Intel_OCL_BI:
+ case CallingConv::PTX_Kernel:
+ case CallingConv::PTX_Device:
+ Check(!F.isVarArg(),
+ "Calling convention does not support varargs or "
+ "perfect forwarding!",
+ &F);
+ break;
+ }
+
+ // Check that the argument values match the function type for this function...
+ unsigned i = 0;
+ for (const Argument &Arg : F.args()) {
+ Check(Arg.getType() == FT->getParamType(i),
+ "Argument value does not match function argument type!", &Arg,
+ FT->getParamType(i));
+ Check(Arg.getType()->isFirstClassType(),
+ "Function arguments must have first-class types!", &Arg);
+ if (!IsIntrinsic) {
+ Check(!Arg.getType()->isMetadataTy(),
+ "Function takes metadata but isn't an intrinsic", &Arg, &F);
+ Check(!Arg.getType()->isTokenTy(),
+ "Function takes token but isn't an intrinsic", &Arg, &F);
+ Check(!Arg.getType()->isX86_AMXTy(),
+ "Function takes x86_amx but isn't an intrinsic", &Arg, &F);
+ }
+
+ // Check that swifterror argument is only used by loads and stores.
+ if (Attrs.hasParamAttr(i, Attribute::SwiftError)) {
+ verifySwiftErrorValue(&Arg);
+ }
+ ++i;
+ }
+
+ if (!IsIntrinsic) {
+ Check(!F.getReturnType()->isTokenTy(),
+ "Function returns a token but isn't an intrinsic", &F);
+ Check(!F.getReturnType()->isX86_AMXTy(),
+ "Function returns a x86_amx but isn't an intrinsic", &F);
+ }
+
+ // Get the function metadata attachments.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ F.getAllMetadata(MDs);
+ assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
+ verifyFunctionMetadata(MDs);
+
+ // Check validity of the personality function
+ if (F.hasPersonalityFn()) {
+ auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
+ if (Per)
+ Check(Per->getParent() == F.getParent(),
+ "Referencing personality function in another module!", &F,
+ F.getParent(), Per, Per->getParent());
+ }
+
+ if (F.isMaterializable()) {
+ // Function has a body somewhere we can't see.
+ Check(MDs.empty(), "unmaterialized function cannot have metadata", &F,
+ MDs.empty() ? nullptr : MDs.front().second);
+ } else if (F.isDeclaration()) {
+ for (const auto &I : MDs) {
+ // This is used for call site debug information.
+ CheckDI(I.first != LLVMContext::MD_dbg ||
+ !cast<DISubprogram>(I.second)->isDistinct(),
+ "function declaration may only have a unique !dbg attachment",
+ &F);
+ Check(I.first != LLVMContext::MD_prof,
+ "function declaration may not have a !prof attachment", &F);
+
+ // Verify the metadata itself.
+ visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
+ }
+ Check(!F.hasPersonalityFn(),
+ "Function declaration shouldn't have a personality routine", &F);
+ } else {
+ // Verify that this function (which has a body) is not named "llvm.*". It
+ // is not legal to define intrinsics.
+ Check(!IsIntrinsic, "llvm intrinsics cannot be defined!", &F);
+
+ // Check the entry node
+ const BasicBlock *Entry = &F.getEntryBlock();
+ Check(pred_empty(Entry),
+ "Entry block to function must not have predecessors!", Entry);
+
+ // The address of the entry block cannot be taken, unless it is dead.
+ if (Entry->hasAddressTaken()) {
+ Check(!BlockAddress::lookup(Entry)->isConstantUsed(),
+ "blockaddress may not be used with the entry block!", Entry);
+ }
+
+ unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
+ // Visit metadata attachments.
+ for (const auto &I : MDs) {
+ // Verify that the attachment is legal.
+ auto AllowLocs = AreDebugLocsAllowed::No;
+ switch (I.first) {
+ default:
+ break;
+ case LLVMContext::MD_dbg: {
+ ++NumDebugAttachments;
+ CheckDI(NumDebugAttachments == 1,
+ "function must have a single !dbg attachment", &F, I.second);
+ CheckDI(isa<DISubprogram>(I.second),
+ "function !dbg attachment must be a subprogram", &F, I.second);
+ CheckDI(cast<DISubprogram>(I.second)->isDistinct(),
+ "function definition may only have a distinct !dbg attachment",
+ &F);
+
+ auto *SP = cast<DISubprogram>(I.second);
+ const Function *&AttachedTo = DISubprogramAttachments[SP];
+ CheckDI(!AttachedTo || AttachedTo == &F,
+ "DISubprogram attached to more than one function", SP, &F);
+ AttachedTo = &F;
+ AllowLocs = AreDebugLocsAllowed::Yes;
+ break;
+ }
+ case LLVMContext::MD_prof:
+ ++NumProfAttachments;
+ Check(NumProfAttachments == 1,
+ "function must have a single !prof attachment", &F, I.second);
+ break;
+ }
+
+ // Verify the metadata itself.
+ visitMDNode(*I.second, AllowLocs);
+ }
+ }
+
+ // If this function is actually an intrinsic, verify that it is only used in
+ // direct call/invokes, never having its "address taken".
+ // Only do this if the module is materialized, otherwise we don't have all the
+ // uses.
+ if (F.isIntrinsic() && F.getParent()->isMaterialized()) {
+ const User *U;
+ if (F.hasAddressTaken(&U, false, true, false,
+ /*IgnoreARCAttachedCall=*/true))
+ Check(false, "Invalid user of intrinsic instruction!", U);
+ }
+
+ // Check intrinsics' signatures.
+ switch (F.getIntrinsicID()) {
+ case Intrinsic::experimental_gc_get_pointer_base: {
+ FunctionType *FT = F.getFunctionType();
+ Check(FT->getNumParams() == 1, "wrong number of parameters", F);
+ Check(isa<PointerType>(F.getReturnType()),
+ "gc.get.pointer.base must return a pointer", F);
+ Check(FT->getParamType(0) == F.getReturnType(),
+ "gc.get.pointer.base operand and result must be of the same type", F);
+ break;
+ }
+ case Intrinsic::experimental_gc_get_pointer_offset: {
+ FunctionType *FT = F.getFunctionType();
+ Check(FT->getNumParams() == 1, "wrong number of parameters", F);
+ Check(isa<PointerType>(FT->getParamType(0)),
+ "gc.get.pointer.offset operand must be a pointer", F);
+ Check(F.getReturnType()->isIntegerTy(),
+ "gc.get.pointer.offset must return integer", F);
+ break;
+ }
+ }
+
+ auto *N = F.getSubprogram();
+ HasDebugInfo = (N != nullptr);
+ if (!HasDebugInfo)
+ return;
+
+ // Check that all !dbg attachments lead to back to N.
+ //
+ // FIXME: Check this incrementally while visiting !dbg attachments.
+ // FIXME: Only check when N is the canonical subprogram for F.
+ SmallPtrSet<const MDNode *, 32> Seen;
+ auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
+ // Be careful about using DILocation here since we might be dealing with
+ // broken code (this is the Verifier after all).
+ const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
+ if (!DL)
+ return;
+ if (!Seen.insert(DL).second)
+ return;
+
+ Metadata *Parent = DL->getRawScope();
+ CheckDI(Parent && isa<DILocalScope>(Parent),
+ "DILocation's scope must be a DILocalScope", N, &F, &I, DL, Parent);
+
+ DILocalScope *Scope = DL->getInlinedAtScope();
+ Check(Scope, "Failed to find DILocalScope", DL);
+
+ if (!Seen.insert(Scope).second)
+ return;
+
+ DISubprogram *SP = Scope->getSubprogram();
+
+ // Scope and SP could be the same MDNode and we don't want to skip
+ // validation in that case
+ if (SP && ((Scope != SP) && !Seen.insert(SP).second))
+ return;
+
+ CheckDI(SP->describes(&F),
+ "!dbg attachment points at wrong subprogram for function", N, &F,
+ &I, DL, Scope, SP);
+ };
+ for (auto &BB : F)
+ for (auto &I : BB) {
+ VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
+ // The llvm.loop annotations also contain two DILocations.
+ if (auto MD = I.getMetadata(LLVMContext::MD_loop))
+ for (unsigned i = 1; i < MD->getNumOperands(); ++i)
+ VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
+ if (BrokenDebugInfo)
+ return;
+ }
+}
+
+// verifyBasicBlock - Verify that a basic block is well formed...
+//
+void Verifier::visitBasicBlock(BasicBlock &BB) {
+ InstsInThisBlock.clear();
+
+ // Ensure that basic blocks have terminators!
+ Check(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
+
+ // Check constraints that this basic block imposes on all of the PHI nodes in
+ // it.
+ if (isa<PHINode>(BB.front())) {
+ SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
+ SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
+ llvm::sort(Preds);
+ for (const PHINode &PN : BB.phis()) {
+ Check(PN.getNumIncomingValues() == Preds.size(),
+ "PHINode should have one entry for each predecessor of its "
+ "parent basic block!",
+ &PN);
+
+ // Get and sort all incoming values in the PHI node...
+ Values.clear();
+ Values.reserve(PN.getNumIncomingValues());
+ for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
+ Values.push_back(
+ std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
+ llvm::sort(Values);
+
+ for (unsigned i = 0, e = Values.size(); i != e; ++i) {
+ // Check to make sure that if there is more than one entry for a
+ // particular basic block in this PHI node, that the incoming values are
+ // all identical.
+ //
+ Check(i == 0 || Values[i].first != Values[i - 1].first ||
+ Values[i].second == Values[i - 1].second,
+ "PHI node has multiple entries for the same basic block with "
+ "different incoming values!",
+ &PN, Values[i].first, Values[i].second, Values[i - 1].second);
+
+ // Check to make sure that the predecessors and PHI node entries are
+ // matched up.
+ Check(Values[i].first == Preds[i],
+ "PHI node entries do not match predecessors!", &PN,
+ Values[i].first, Preds[i]);
+ }
+ }
+ }
+
+ // Check that all instructions have their parent pointers set up correctly.
+ for (auto &I : BB)
+ {
+ Check(I.getParent() == &BB, "Instruction has bogus parent pointer!");
+ }
+}
+
+void Verifier::visitTerminator(Instruction &I) {
+ // Ensure that terminators only exist at the end of the basic block.
+ Check(&I == I.getParent()->getTerminator(),
+ "Terminator found in the middle of a basic block!", I.getParent());
+ visitInstruction(I);
+}
+
+void Verifier::visitBranchInst(BranchInst &BI) {
+ if (BI.isConditional()) {
+ Check(BI.getCondition()->getType()->isIntegerTy(1),
+ "Branch condition is not 'i1' type!", &BI, BI.getCondition());
+ }
+ visitTerminator(BI);
+}
+
+void Verifier::visitReturnInst(ReturnInst &RI) {
+ Function *F = RI.getParent()->getParent();
+ unsigned N = RI.getNumOperands();
+ if (F->getReturnType()->isVoidTy())
+ Check(N == 0,
+ "Found return instr that returns non-void in Function of void "
+ "return type!",
+ &RI, F->getReturnType());
+ else
+ Check(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
+ "Function return type does not match operand "
+ "type of return inst!",
+ &RI, F->getReturnType());
+
+ // Check to make sure that the return value has necessary properties for
+ // terminators...
+ visitTerminator(RI);
+}
+
+void Verifier::visitSwitchInst(SwitchInst &SI) {
+ Check(SI.getType()->isVoidTy(), "Switch must have void result type!", &SI);
+ // Check to make sure that all of the constants in the switch instruction
+ // have the same type as the switched-on value.
+ Type *SwitchTy = SI.getCondition()->getType();
+ SmallPtrSet<ConstantInt*, 32> Constants;
+ for (auto &Case : SI.cases()) {
+ Check(Case.getCaseValue()->getType() == SwitchTy,
+ "Switch constants must all be same type as switch value!", &SI);
+ Check(Constants.insert(Case.getCaseValue()).second,
+ "Duplicate integer as switch case", &SI, Case.getCaseValue());
+ }
+
+ visitTerminator(SI);
+}
+
+void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
+ Check(BI.getAddress()->getType()->isPointerTy(),
+ "Indirectbr operand must have pointer type!", &BI);
+ for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
+ Check(BI.getDestination(i)->getType()->isLabelTy(),
+ "Indirectbr destinations must all have pointer type!", &BI);
+
+ visitTerminator(BI);
+}
+
+void Verifier::visitCallBrInst(CallBrInst &CBI) {
+ Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI);
+ const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
+ Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
+ for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
+ Check(CBI.getSuccessor(i)->getType()->isLabelTy(),
+ "Callbr successors must all have pointer type!", &CBI);
+ for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
+ Check(i >= CBI.arg_size() || !isa<BasicBlock>(CBI.getOperand(i)),
+ "Using an unescaped label as a callbr argument!", &CBI);
+ if (isa<BasicBlock>(CBI.getOperand(i)))
+ for (unsigned j = i + 1; j != e; ++j)
+ Check(CBI.getOperand(i) != CBI.getOperand(j),
+ "Duplicate callbr destination!", &CBI);
+ }
+ {
+ SmallPtrSet<BasicBlock *, 4> ArgBBs;
+ for (Value *V : CBI.args())
+ if (auto *BA = dyn_cast<BlockAddress>(V))
+ ArgBBs.insert(BA->getBasicBlock());
+ for (BasicBlock *BB : CBI.getIndirectDests())
+ Check(ArgBBs.count(BB), "Indirect label missing from arglist.", &CBI);
+ }
+
+ verifyInlineAsmCall(CBI);
+ visitTerminator(CBI);
+}
+
+void Verifier::visitSelectInst(SelectInst &SI) {
+ Check(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
+ SI.getOperand(2)),
+ "Invalid operands for select instruction!", &SI);
+
+ Check(SI.getTrueValue()->getType() == SI.getType(),
+ "Select values must have same type as select instruction!", &SI);
+ visitInstruction(SI);
+}
+
+/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
+/// a pass, if any exist, it's an error.
+///
+void Verifier::visitUserOp1(Instruction &I) {
+ Check(false, "User-defined operators should not live outside of a pass!", &I);
+}
+
+void Verifier::visitTruncInst(TruncInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Check(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
+ Check(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "trunc source and destination must both be a vector or neither", &I);
+ Check(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitZExtInst(ZExtInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ Check(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
+ Check(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "zext source and destination must both be a vector or neither", &I);
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Check(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitSExtInst(SExtInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Check(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
+ Check(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "sext source and destination must both be a vector or neither", &I);
+ Check(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPTruncInst(FPTruncInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Check(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
+ Check(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "fptrunc source and destination must both be a vector or neither", &I);
+ Check(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPExtInst(FPExtInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ // Get the size of the types in bits, we'll need this later
+ unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
+ unsigned DestBitSize = DestTy->getScalarSizeInBits();
+
+ Check(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
+ Check(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(),
+ "fpext source and destination must both be a vector or neither", &I);
+ Check(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitUIToFPInst(UIToFPInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Check(SrcVec == DstVec,
+ "UIToFP source and dest must both be vector or scalar", &I);
+ Check(SrcTy->isIntOrIntVectorTy(),
+ "UIToFP source must be integer or integer vector", &I);
+ Check(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
+ &I);
+
+ if (SrcVec && DstVec)
+ Check(cast<VectorType>(SrcTy)->getElementCount() ==
+ cast<VectorType>(DestTy)->getElementCount(),
+ "UIToFP source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitSIToFPInst(SIToFPInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Check(SrcVec == DstVec,
+ "SIToFP source and dest must both be vector or scalar", &I);
+ Check(SrcTy->isIntOrIntVectorTy(),
+ "SIToFP source must be integer or integer vector", &I);
+ Check(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
+ &I);
+
+ if (SrcVec && DstVec)
+ Check(cast<VectorType>(SrcTy)->getElementCount() ==
+ cast<VectorType>(DestTy)->getElementCount(),
+ "SIToFP source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPToUIInst(FPToUIInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Check(SrcVec == DstVec,
+ "FPToUI source and dest must both be vector or scalar", &I);
+ Check(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector", &I);
+ Check(DestTy->isIntOrIntVectorTy(),
+ "FPToUI result must be integer or integer vector", &I);
+
+ if (SrcVec && DstVec)
+ Check(cast<VectorType>(SrcTy)->getElementCount() ==
+ cast<VectorType>(DestTy)->getElementCount(),
+ "FPToUI source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitFPToSIInst(FPToSIInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ bool SrcVec = SrcTy->isVectorTy();
+ bool DstVec = DestTy->isVectorTy();
+
+ Check(SrcVec == DstVec,
+ "FPToSI source and dest must both be vector or scalar", &I);
+ Check(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector", &I);
+ Check(DestTy->isIntOrIntVectorTy(),
+ "FPToSI result must be integer or integer vector", &I);
+
+ if (SrcVec && DstVec)
+ Check(cast<VectorType>(SrcTy)->getElementCount() ==
+ cast<VectorType>(DestTy)->getElementCount(),
+ "FPToSI source and dest vector length mismatch", &I);
+
+ visitInstruction(I);
+}
+
+void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ Check(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
+
+ Check(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
+ &I);
+
+ if (SrcTy->isVectorTy()) {
+ auto *VSrc = cast<VectorType>(SrcTy);
+ auto *VDest = cast<VectorType>(DestTy);
+ Check(VSrc->getElementCount() == VDest->getElementCount(),
+ "PtrToInt Vector width mismatch", &I);
+ }
+
+ visitInstruction(I);
+}
+
+void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
+ // Get the source and destination types
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ Check(SrcTy->isIntOrIntVectorTy(), "IntToPtr source must be an integral", &I);
+ Check(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
+
+ Check(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
+ &I);
+ if (SrcTy->isVectorTy()) {
+ auto *VSrc = cast<VectorType>(SrcTy);
+ auto *VDest = cast<VectorType>(DestTy);
+ Check(VSrc->getElementCount() == VDest->getElementCount(),
+ "IntToPtr Vector width mismatch", &I);
+ }
+ visitInstruction(I);
+}
+
+void Verifier::visitBitCastInst(BitCastInst &I) {
+ Check(
+ CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
+ "Invalid bitcast", &I);
+ visitInstruction(I);
+}
+
+void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
+ Type *SrcTy = I.getOperand(0)->getType();
+ Type *DestTy = I.getType();
+
+ Check(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
+ &I);
+ Check(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
+ &I);
+ Check(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
+ "AddrSpaceCast must be between different address spaces", &I);
+ if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
+ Check(SrcVTy->getElementCount() ==
+ cast<VectorType>(DestTy)->getElementCount(),
+ "AddrSpaceCast vector pointer number of elements mismatch", &I);
+ visitInstruction(I);
+}
+
+/// visitPHINode - Ensure that a PHI node is well formed.
+///
+void Verifier::visitPHINode(PHINode &PN) {
+ // Ensure that the PHI nodes are all grouped together at the top of the block.
+ // This can be tested by checking whether the instruction before this is
+ // either nonexistent (because this is begin()) or is a PHI node. If not,
+ // then there is some other instruction before a PHI.
+ Check(&PN == &PN.getParent()->front() ||
+ isa<PHINode>(--BasicBlock::iterator(&PN)),
+ "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
+
+ // Check that a PHI doesn't yield a Token.
+ Check(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
+
+ // Check that all of the values of the PHI node have the same type as the
+ // result, and that the incoming blocks are really basic blocks.
+ for (Value *IncValue : PN.incoming_values()) {
+ Check(PN.getType() == IncValue->getType(),
+ "PHI node operands are not the same type as the result!", &PN);
+ }
+
+ // All other PHI node constraints are checked in the visitBasicBlock method.
+
+ visitInstruction(PN);
+}
+
+void Verifier::visitCallBase(CallBase &Call) {
+ Check(Call.getCalledOperand()->getType()->isPointerTy(),
+ "Called function must be a pointer!", Call);
+ PointerType *FPTy = cast<PointerType>(Call.getCalledOperand()->getType());
+
+ Check(FPTy->isOpaqueOrPointeeTypeMatches(Call.getFunctionType()),
+ "Called function is not the same type as the call!", Call);
+
+ FunctionType *FTy = Call.getFunctionType();
+
+ // Verify that the correct number of arguments are being passed
+ if (FTy->isVarArg())
+ Check(Call.arg_size() >= FTy->getNumParams(),
+ "Called function requires more parameters than were provided!", Call);
+ else
+ Check(Call.arg_size() == FTy->getNumParams(),
+ "Incorrect number of arguments passed to called function!", Call);
+
+ // Verify that all arguments to the call match the function type.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ Check(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
+ "Call parameter type does not match function signature!",
+ Call.getArgOperand(i), FTy->getParamType(i), Call);
+
+ AttributeList Attrs = Call.getAttributes();
+
+ Check(verifyAttributeCount(Attrs, Call.arg_size()),
+ "Attribute after last parameter!", Call);
+
+ auto VerifyTypeAlign = [&](Type *Ty, const Twine &Message) {
+ if (!Ty->isSized())
+ return;
+ Align ABIAlign = DL.getABITypeAlign(Ty);
+ Align MaxAlign(ParamMaxAlignment);
+ Check(ABIAlign <= MaxAlign,
+ "Incorrect alignment of " + Message + " to called function!", Call);
+ };
+
+ VerifyTypeAlign(FTy->getReturnType(), "return type");
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+ Type *Ty = FTy->getParamType(i);
+ VerifyTypeAlign(Ty, "argument passed");
+ }
+
+ Function *Callee =
+ dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
+ bool IsIntrinsic = Callee && Callee->isIntrinsic();
+ if (IsIntrinsic)
+ Check(Callee->getValueType() == FTy,
+ "Intrinsic called with incompatible signature", Call);
+
+ if (Attrs.hasFnAttr(Attribute::Speculatable)) {
+ // Don't allow speculatable on call sites, unless the underlying function
+ // declaration is also speculatable.
+ Check(Callee && Callee->isSpeculatable(),
+ "speculatable attribute may not apply to call sites", Call);
+ }
+
+ if (Attrs.hasFnAttr(Attribute::Preallocated)) {
+ Check(Call.getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::call_preallocated_arg,
+ "preallocated as a call site attribute can only be on "
+ "llvm.call.preallocated.arg");
+ }
+
+ // Verify call attributes.
+ verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic, Call.isInlineAsm());
+
+ // Conservatively check the inalloca argument.
+ // We have a bug if we can find that there is an underlying alloca without
+ // inalloca.
+ if (Call.hasInAllocaArgument()) {
+ Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
+ if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
+ Check(AI->isUsedWithInAlloca(),
+ "inalloca argument for call has mismatched alloca", AI, Call);
+ }
+
+ // For each argument of the callsite, if it has the swifterror argument,
+ // make sure the underlying alloca/parameter it comes from has a swifterror as
+ // well.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+ if (Call.paramHasAttr(i, Attribute::SwiftError)) {
+ Value *SwiftErrorArg = Call.getArgOperand(i);
+ if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
+ Check(AI->isSwiftError(),
+ "swifterror argument for call has mismatched alloca", AI, Call);
+ continue;
+ }
+ auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
+ Check(ArgI, "swifterror argument should come from an alloca or parameter",
+ SwiftErrorArg, Call);
+ Check(ArgI->hasSwiftErrorAttr(),
+ "swifterror argument for call has mismatched parameter", ArgI,
+ Call);
+ }
+
+ if (Attrs.hasParamAttr(i, Attribute::ImmArg)) {
+ // Don't allow immarg on call sites, unless the underlying declaration
+ // also has the matching immarg.
+ Check(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
+ "immarg may not apply only to call sites", Call.getArgOperand(i),
+ Call);
+ }
+
+ if (Call.paramHasAttr(i, Attribute::ImmArg)) {
+ Value *ArgVal = Call.getArgOperand(i);
+ Check(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
+ "immarg operand has non-immediate parameter", ArgVal, Call);
+ }
+
+ if (Call.paramHasAttr(i, Attribute::Preallocated)) {
+ Value *ArgVal = Call.getArgOperand(i);
+ bool hasOB =
+ Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
+ bool isMustTail = Call.isMustTailCall();
+ Check(hasOB != isMustTail,
+ "preallocated operand either requires a preallocated bundle or "
+ "the call to be musttail (but not both)",
+ ArgVal, Call);
+ }
+ }
+
+ if (FTy->isVarArg()) {
+ // FIXME? is 'nest' even legal here?
+ bool SawNest = false;
+ bool SawReturned = false;
+
+ for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
+ if (Attrs.hasParamAttr(Idx, Attribute::Nest))
+ SawNest = true;
+ if (Attrs.hasParamAttr(Idx, Attribute::Returned))
+ SawReturned = true;
+ }
+
+ // Check attributes on the varargs part.
+ for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
+ Type *Ty = Call.getArgOperand(Idx)->getType();
+ AttributeSet ArgAttrs = Attrs.getParamAttrs(Idx);
+ verifyParameterAttrs(ArgAttrs, Ty, &Call);
+
+ if (ArgAttrs.hasAttribute(Attribute::Nest)) {
+ Check(!SawNest, "More than one parameter has attribute nest!", Call);
+ SawNest = true;
+ }
+
+ if (ArgAttrs.hasAttribute(Attribute::Returned)) {
+ Check(!SawReturned, "More than one parameter has attribute returned!",
+ Call);
+ Check(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
+ "Incompatible argument and return types for 'returned' "
+ "attribute",
+ Call);
+ SawReturned = true;
+ }
+
+ // Statepoint intrinsic is vararg but the wrapped function may be not.
+ // Allow sret here and check the wrapped function in verifyStatepoint.
+ if (!Call.getCalledFunction() ||
+ Call.getCalledFunction()->getIntrinsicID() !=
+ Intrinsic::experimental_gc_statepoint)
+ Check(!ArgAttrs.hasAttribute(Attribute::StructRet),
+ "Attribute 'sret' cannot be used for vararg call arguments!",
+ Call);
+
+ if (ArgAttrs.hasAttribute(Attribute::InAlloca))
+ Check(Idx == Call.arg_size() - 1,
+ "inalloca isn't on the last argument!", Call);
+ }
+ }
+
+ // Verify that there's no metadata unless it's a direct call to an intrinsic.
+ if (!IsIntrinsic) {
+ for (Type *ParamTy : FTy->params()) {
+ Check(!ParamTy->isMetadataTy(),
+ "Function has metadata parameter but isn't an intrinsic", Call);
+ Check(!ParamTy->isTokenTy(),
+ "Function has token parameter but isn't an intrinsic", Call);
+ }
+ }
+
+ // Verify that indirect calls don't return tokens.
+ if (!Call.getCalledFunction()) {
+ Check(!FTy->getReturnType()->isTokenTy(),
+ "Return type cannot be token for indirect call!");
+ Check(!FTy->getReturnType()->isX86_AMXTy(),
+ "Return type cannot be x86_amx for indirect call!");
+ }
+
+ if (Function *F = Call.getCalledFunction())
+ if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
+ visitIntrinsicCall(ID, Call);
+
+ // Verify that a callsite has at most one "deopt", at most one "funclet", at
+ // most one "gc-transition", at most one "cfguardtarget", at most one
+ // "preallocated" operand bundle, and at most one "ptrauth" operand bundle.
+ bool FoundDeoptBundle = false, FoundFuncletBundle = false,
+ FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
+ FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
+ FoundPtrauthBundle = false,
+ FoundAttachedCallBundle = false;
+ for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
+ OperandBundleUse BU = Call.getOperandBundleAt(i);
+ uint32_t Tag = BU.getTagID();
+ if (Tag == LLVMContext::OB_deopt) {
+ Check(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
+ FoundDeoptBundle = true;
+ } else if (Tag == LLVMContext::OB_gc_transition) {
+ Check(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
+ Call);
+ FoundGCTransitionBundle = true;
+ } else if (Tag == LLVMContext::OB_funclet) {
+ Check(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
+ FoundFuncletBundle = true;
+ Check(BU.Inputs.size() == 1,
+ "Expected exactly one funclet bundle operand", Call);
+ Check(isa<FuncletPadInst>(BU.Inputs.front()),
+ "Funclet bundle operands should correspond to a FuncletPadInst",
+ Call);
+ } else if (Tag == LLVMContext::OB_cfguardtarget) {
+ Check(!FoundCFGuardTargetBundle, "Multiple CFGuardTarget operand bundles",
+ Call);
+ FoundCFGuardTargetBundle = true;
+ Check(BU.Inputs.size() == 1,
+ "Expected exactly one cfguardtarget bundle operand", Call);
+ } else if (Tag == LLVMContext::OB_ptrauth) {
+ Check(!FoundPtrauthBundle, "Multiple ptrauth operand bundles", Call);
+ FoundPtrauthBundle = true;
+ Check(BU.Inputs.size() == 2,
+ "Expected exactly two ptrauth bundle operands", Call);
+ Check(isa<ConstantInt>(BU.Inputs[0]) &&
+ BU.Inputs[0]->getType()->isIntegerTy(32),
+ "Ptrauth bundle key operand must be an i32 constant", Call);
+ Check(BU.Inputs[1]->getType()->isIntegerTy(64),
+ "Ptrauth bundle discriminator operand must be an i64", Call);
+ } else if (Tag == LLVMContext::OB_preallocated) {
+ Check(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
+ Call);
+ FoundPreallocatedBundle = true;
+ Check(BU.Inputs.size() == 1,
+ "Expected exactly one preallocated bundle operand", Call);
+ auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
+ Check(Input &&
+ Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
+ "\"preallocated\" argument must be a token from "
+ "llvm.call.preallocated.setup",
+ Call);
+ } else if (Tag == LLVMContext::OB_gc_live) {
+ Check(!FoundGCLiveBundle, "Multiple gc-live operand bundles", Call);
+ FoundGCLiveBundle = true;
+ } else if (Tag == LLVMContext::OB_clang_arc_attachedcall) {
+ Check(!FoundAttachedCallBundle,
+ "Multiple \"clang.arc.attachedcall\" operand bundles", Call);
+ FoundAttachedCallBundle = true;
+ verifyAttachedCallBundle(Call, BU);
+ }
+ }
+
+ // Verify that callee and callsite agree on whether to use pointer auth.
+ Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
+ "Direct call cannot have a ptrauth bundle", Call);
+
+ // Verify that each inlinable callsite of a debug-info-bearing function in a
+ // debug-info-bearing function has a debug location attached to it. Failure to
+ // do so causes assertion failures when the inliner sets up inline scope info.
+ if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
+ Call.getCalledFunction()->getSubprogram())
+ CheckDI(Call.getDebugLoc(),
+ "inlinable function call in a function with "
+ "debug info must have a !dbg location",
+ Call);
+
+ if (Call.isInlineAsm())
+ verifyInlineAsmCall(Call);
+
+ visitInstruction(Call);
+}
+
+void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
+ StringRef Context) {
+ Check(!Attrs.contains(Attribute::InAlloca),
+ Twine("inalloca attribute not allowed in ") + Context);
+ Check(!Attrs.contains(Attribute::InReg),
+ Twine("inreg attribute not allowed in ") + Context);
+ Check(!Attrs.contains(Attribute::SwiftError),
+ Twine("swifterror attribute not allowed in ") + Context);
+ Check(!Attrs.contains(Attribute::Preallocated),
+ Twine("preallocated attribute not allowed in ") + Context);
+ Check(!Attrs.contains(Attribute::ByRef),
+ Twine("byref attribute not allowed in ") + Context);
+}
+
+/// Two types are "congruent" if they are identical, or if they are both pointer
+/// types with different pointee types and the same address space.
+static bool isTypeCongruent(Type *L, Type *R) {
+ if (L == R)
+ return true;
+ PointerType *PL = dyn_cast<PointerType>(L);
+ PointerType *PR = dyn_cast<PointerType>(R);
+ if (!PL || !PR)
+ return false;
+ return PL->getAddressSpace() == PR->getAddressSpace();
+}
+
+static AttrBuilder getParameterABIAttributes(LLVMContext& C, unsigned I, AttributeList Attrs) {
+ static const Attribute::AttrKind ABIAttrs[] = {
+ Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
+ Attribute::InReg, Attribute::StackAlignment, Attribute::SwiftSelf,
+ Attribute::SwiftAsync, Attribute::SwiftError, Attribute::Preallocated,
+ Attribute::ByRef};
+ AttrBuilder Copy(C);
+ for (auto AK : ABIAttrs) {
+ Attribute Attr = Attrs.getParamAttrs(I).getAttribute(AK);
+ if (Attr.isValid())
+ Copy.addAttribute(Attr);
+ }
+
+ // `align` is ABI-affecting only in combination with `byval` or `byref`.
+ if (Attrs.hasParamAttr(I, Attribute::Alignment) &&
+ (Attrs.hasParamAttr(I, Attribute::ByVal) ||
+ Attrs.hasParamAttr(I, Attribute::ByRef)))
+ Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
+ return Copy;
+}
+
+void Verifier::verifyMustTailCall(CallInst &CI) {
+ Check(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
+
+ Function *F = CI.getParent()->getParent();
+ FunctionType *CallerTy = F->getFunctionType();
+ FunctionType *CalleeTy = CI.getFunctionType();
+ Check(CallerTy->isVarArg() == CalleeTy->isVarArg(),
+ "cannot guarantee tail call due to mismatched varargs", &CI);
+ Check(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
+ "cannot guarantee tail call due to mismatched return types", &CI);
+
+ // - The calling conventions of the caller and callee must match.
+ Check(F->getCallingConv() == CI.getCallingConv(),
+ "cannot guarantee tail call due to mismatched calling conv", &CI);
+
+ // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
+ // or a pointer bitcast followed by a ret instruction.
+ // - The ret instruction must return the (possibly bitcasted) value
+ // produced by the call or void.
+ Value *RetVal = &CI;
+ Instruction *Next = CI.getNextNode();
+
+ // Handle the optional bitcast.
+ if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
+ Check(BI->getOperand(0) == RetVal,
+ "bitcast following musttail call must use the call", BI);
+ RetVal = BI;
+ Next = BI->getNextNode();
+ }
+
+ // Check the return.
+ ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
+ Check(Ret, "musttail call must precede a ret with an optional bitcast", &CI);
+ Check(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal ||
+ isa<UndefValue>(Ret->getReturnValue()),
+ "musttail call result must be returned", Ret);
+
+ AttributeList CallerAttrs = F->getAttributes();
+ AttributeList CalleeAttrs = CI.getAttributes();
+ if (CI.getCallingConv() == CallingConv::SwiftTail ||
+ CI.getCallingConv() == CallingConv::Tail) {
+ StringRef CCName =
+ CI.getCallingConv() == CallingConv::Tail ? "tailcc" : "swifttailcc";
+
+ // - Only sret, byval, swiftself, and swiftasync ABI-impacting attributes
+ // are allowed in swifttailcc call
+ for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
+ SmallString<32> Context{CCName, StringRef(" musttail caller")};
+ verifyTailCCMustTailAttrs(ABIAttrs, Context);
+ }
+ for (unsigned I = 0, E = CalleeTy->getNumParams(); I != E; ++I) {
+ AttrBuilder ABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
+ SmallString<32> Context{CCName, StringRef(" musttail callee")};
+ verifyTailCCMustTailAttrs(ABIAttrs, Context);
+ }
+ // - Varargs functions are not allowed
+ Check(!CallerTy->isVarArg(), Twine("cannot guarantee ") + CCName +
+ " tail call for varargs function");
+ return;
+ }
+
+ // - The caller and callee prototypes must match. Pointer types of
+ // parameters or return types may differ in pointee type, but not
+ // address space.
+ if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
+ Check(CallerTy->getNumParams() == CalleeTy->getNumParams(),
+ "cannot guarantee tail call due to mismatched parameter counts", &CI);
+ for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ Check(
+ isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
+ "cannot guarantee tail call due to mismatched parameter types", &CI);
+ }
+ }
+
+ // - All ABI-impacting function attributes, such as sret, byval, inreg,
+ // returned, preallocated, and inalloca, must match.
+ for (unsigned I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
+ AttrBuilder CallerABIAttrs = getParameterABIAttributes(F->getContext(), I, CallerAttrs);
+ AttrBuilder CalleeABIAttrs = getParameterABIAttributes(F->getContext(), I, CalleeAttrs);
+ Check(CallerABIAttrs == CalleeABIAttrs,
+ "cannot guarantee tail call due to mismatched ABI impacting "
+ "function attributes",
+ &CI, CI.getOperand(I));
+ }
+}
+
+void Verifier::visitCallInst(CallInst &CI) {
+ visitCallBase(CI);
+
+ if (CI.isMustTailCall())
+ verifyMustTailCall(CI);
+}
+
+void Verifier::visitInvokeInst(InvokeInst &II) {
+ visitCallBase(II);
+
+ // Verify that the first non-PHI instruction of the unwind destination is an
+ // exception handling instruction.
+ Check(
+ II.getUnwindDest()->isEHPad(),
+ "The unwind destination does not have an exception handling instruction!",
+ &II);
+
+ visitTerminator(II);
+}
+
+/// visitUnaryOperator - Check the argument to the unary operator.
+///
+void Verifier::visitUnaryOperator(UnaryOperator &U) {
+ Check(U.getType() == U.getOperand(0)->getType(),
+ "Unary operators must have same type for"
+ "operands and result!",
+ &U);
+
+ switch (U.getOpcode()) {
+ // Check that floating-point arithmetic operators are only used with
+ // floating-point operands.
+ case Instruction::FNeg:
+ Check(U.getType()->isFPOrFPVectorTy(),
+ "FNeg operator only works with float types!", &U);
+ break;
+ default:
+ llvm_unreachable("Unknown UnaryOperator opcode!");
+ }
+
+ visitInstruction(U);
+}
+
+/// visitBinaryOperator - Check that both arguments to the binary operator are
+/// of the same type!
+///
+void Verifier::visitBinaryOperator(BinaryOperator &B) {
+ Check(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
+ "Both operands to a binary operator are not of the same type!", &B);
+
+ switch (B.getOpcode()) {
+ // Check that integer arithmetic operators are only used with
+ // integral operands.
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::SRem:
+ case Instruction::URem:
+ Check(B.getType()->isIntOrIntVectorTy(),
+ "Integer arithmetic operators only work with integral types!", &B);
+ Check(B.getType() == B.getOperand(0)->getType(),
+ "Integer arithmetic operators must have same type "
+ "for operands and result!",
+ &B);
+ break;
+ // Check that floating-point arithmetic operators are only used with
+ // floating-point operands.
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ Check(B.getType()->isFPOrFPVectorTy(),
+ "Floating-point arithmetic operators only work with "
+ "floating-point types!",
+ &B);
+ Check(B.getType() == B.getOperand(0)->getType(),
+ "Floating-point arithmetic operators must have same type "
+ "for operands and result!",
+ &B);
+ break;
+ // Check that logical operators are only used with integral operands.
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ Check(B.getType()->isIntOrIntVectorTy(),
+ "Logical operators only work with integral types!", &B);
+ Check(B.getType() == B.getOperand(0)->getType(),
+ "Logical operators must have same type for operands and result!", &B);
+ break;
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ Check(B.getType()->isIntOrIntVectorTy(),
+ "Shifts only work with integral types!", &B);
+ Check(B.getType() == B.getOperand(0)->getType(),
+ "Shift return type must be same as operands!", &B);
+ break;
+ default:
+ llvm_unreachable("Unknown BinaryOperator opcode!");
+ }
+
+ visitInstruction(B);
+}
+
+void Verifier::visitICmpInst(ICmpInst &IC) {
+ // Check that the operands are the same type
+ Type *Op0Ty = IC.getOperand(0)->getType();
+ Type *Op1Ty = IC.getOperand(1)->getType();
+ Check(Op0Ty == Op1Ty,
+ "Both operands to ICmp instruction are not of the same type!", &IC);
+ // Check that the operands are the right type
+ Check(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
+ "Invalid operand types for ICmp instruction", &IC);
+ // Check that the predicate is valid.
+ Check(IC.isIntPredicate(), "Invalid predicate in ICmp instruction!", &IC);
+
+ visitInstruction(IC);
+}
+
+void Verifier::visitFCmpInst(FCmpInst &FC) {
+ // Check that the operands are the same type
+ Type *Op0Ty = FC.getOperand(0)->getType();
+ Type *Op1Ty = FC.getOperand(1)->getType();
+ Check(Op0Ty == Op1Ty,
+ "Both operands to FCmp instruction are not of the same type!", &FC);
+ // Check that the operands are the right type
+ Check(Op0Ty->isFPOrFPVectorTy(), "Invalid operand types for FCmp instruction",
+ &FC);
+ // Check that the predicate is valid.
+ Check(FC.isFPPredicate(), "Invalid predicate in FCmp instruction!", &FC);
+
+ visitInstruction(FC);
+}
+
+void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
+ Check(ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
+ "Invalid extractelement operands!", &EI);
+ visitInstruction(EI);
+}
+
+void Verifier::visitInsertElementInst(InsertElementInst &IE) {
+ Check(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
+ IE.getOperand(2)),
+ "Invalid insertelement operands!", &IE);
+ visitInstruction(IE);
+}
+
+void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
+ Check(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
+ SV.getShuffleMask()),
+ "Invalid shufflevector operands!", &SV);
+ visitInstruction(SV);
+}
+
+void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
+ Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
+
+ Check(isa<PointerType>(TargetTy),
+ "GEP base pointer is not a vector or a vector of pointers", &GEP);
+ Check(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
+
+ SmallVector<Value *, 16> Idxs(GEP.indices());
+ Check(
+ all_of(Idxs, [](Value *V) { return V->getType()->isIntOrIntVectorTy(); }),
+ "GEP indexes must be integers", &GEP);
+ Type *ElTy =
+ GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
+ Check(ElTy, "Invalid indices for GEP pointer type!", &GEP);
+
+ Check(GEP.getType()->isPtrOrPtrVectorTy() &&
+ GEP.getResultElementType() == ElTy,
+ "GEP is not of right type for indices!", &GEP, ElTy);
+
+ if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
+ // Additional checks for vector GEPs.
+ ElementCount GEPWidth = GEPVTy->getElementCount();
+ if (GEP.getPointerOperandType()->isVectorTy())
+ Check(
+ GEPWidth ==
+ cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
+ "Vector GEP result width doesn't match operand's", &GEP);
+ for (Value *Idx : Idxs) {
+ Type *IndexTy = Idx->getType();
+ if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
+ ElementCount IndexWidth = IndexVTy->getElementCount();
+ Check(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
+ }
+ Check(IndexTy->isIntOrIntVectorTy(),
+ "All GEP indices should be of integer type");
+ }
+ }
+
+ if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
+ Check(GEP.getAddressSpace() == PTy->getAddressSpace(),
+ "GEP address space doesn't match type", &GEP);
+ }
+
+ visitInstruction(GEP);
+}
+
+static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
+ return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
+}
+
+void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
+ assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
+ "precondition violation");
+
+ unsigned NumOperands = Range->getNumOperands();
+ Check(NumOperands % 2 == 0, "Unfinished range!", Range);
+ unsigned NumRanges = NumOperands / 2;
+ Check(NumRanges >= 1, "It should have at least one range!", Range);
+
+ ConstantRange LastRange(1, true); // Dummy initial value
+ for (unsigned i = 0; i < NumRanges; ++i) {
+ ConstantInt *Low =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
+ Check(Low, "The lower limit must be an integer!", Low);
+ ConstantInt *High =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
+ Check(High, "The upper limit must be an integer!", High);
+ Check(High->getType() == Low->getType() && High->getType() == Ty,
+ "Range types must match instruction type!", &I);
+
+ APInt HighV = High->getValue();
+ APInt LowV = Low->getValue();
+ ConstantRange CurRange(LowV, HighV);
+ Check(!CurRange.isEmptySet() && !CurRange.isFullSet(),
+ "Range must not be empty!", Range);
+ if (i != 0) {
+ Check(CurRange.intersectWith(LastRange).isEmptySet(),
+ "Intervals are overlapping", Range);
+ Check(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
+ Range);
+ Check(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
+ Range);
+ }
+ LastRange = ConstantRange(LowV, HighV);
+ }
+ if (NumRanges > 2) {
+ APInt FirstLow =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
+ APInt FirstHigh =
+ mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
+ ConstantRange FirstRange(FirstLow, FirstHigh);
+ Check(FirstRange.intersectWith(LastRange).isEmptySet(),
+ "Intervals are overlapping", Range);
+ Check(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
+ Range);
+ }
+}
+
+void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
+ unsigned Size = DL.getTypeSizeInBits(Ty);
+ Check(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
+ Check(!(Size & (Size - 1)),
+ "atomic memory access' operand must have a power-of-two size", Ty, I);
+}
+
+void Verifier::visitLoadInst(LoadInst &LI) {
+ PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
+ Check(PTy, "Load operand must be a pointer.", &LI);
+ Type *ElTy = LI.getType();
+ if (MaybeAlign A = LI.getAlign()) {
+ Check(A->value() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &LI);
+ }
+ Check(ElTy->isSized(), "loading unsized types is not allowed", &LI);
+ if (LI.isAtomic()) {
+ Check(LI.getOrdering() != AtomicOrdering::Release &&
+ LI.getOrdering() != AtomicOrdering::AcquireRelease,
+ "Load cannot have Release ordering", &LI);
+ Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
+ "atomic load operand must have integer, pointer, or floating point "
+ "type!",
+ ElTy, &LI);
+ checkAtomicMemAccessSize(ElTy, &LI);
+ } else {
+ Check(LI.getSyncScopeID() == SyncScope::System,
+ "Non-atomic load cannot have SynchronizationScope specified", &LI);
+ }
+
+ visitInstruction(LI);
+}
+
+void Verifier::visitStoreInst(StoreInst &SI) {
+ PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
+ Check(PTy, "Store operand must be a pointer.", &SI);
+ Type *ElTy = SI.getOperand(0)->getType();
+ Check(PTy->isOpaqueOrPointeeTypeMatches(ElTy),
+ "Stored value type does not match pointer operand type!", &SI, ElTy);
+ if (MaybeAlign A = SI.getAlign()) {
+ Check(A->value() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &SI);
+ }
+ Check(ElTy->isSized(), "storing unsized types is not allowed", &SI);
+ if (SI.isAtomic()) {
+ Check(SI.getOrdering() != AtomicOrdering::Acquire &&
+ SI.getOrdering() != AtomicOrdering::AcquireRelease,
+ "Store cannot have Acquire ordering", &SI);
+ Check(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
+ "atomic store operand must have integer, pointer, or floating point "
+ "type!",
+ ElTy, &SI);
+ checkAtomicMemAccessSize(ElTy, &SI);
+ } else {
+ Check(SI.getSyncScopeID() == SyncScope::System,
+ "Non-atomic store cannot have SynchronizationScope specified", &SI);
+ }
+ visitInstruction(SI);
+}
+
+/// Check that SwiftErrorVal is used as a swifterror argument in CS.
+void Verifier::verifySwiftErrorCall(CallBase &Call,
+ const Value *SwiftErrorVal) {
+ for (const auto &I : llvm::enumerate(Call.args())) {
+ if (I.value() == SwiftErrorVal) {
+ Check(Call.paramHasAttr(I.index(), Attribute::SwiftError),
+ "swifterror value when used in a callsite should be marked "
+ "with swifterror attribute",
+ SwiftErrorVal, Call);
+ }
+ }
+}
+
+void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
+ // Check that swifterror value is only used by loads, stores, or as
+ // a swifterror argument.
+ for (const User *U : SwiftErrorVal->users()) {
+ Check(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
+ isa<InvokeInst>(U),
+ "swifterror value can only be loaded and stored from, or "
+ "as a swifterror argument!",
+ SwiftErrorVal, U);
+ // If it is used by a store, check it is the second operand.
+ if (auto StoreI = dyn_cast<StoreInst>(U))
+ Check(StoreI->getOperand(1) == SwiftErrorVal,
+ "swifterror value should be the second operand when used "
+ "by stores",
+ SwiftErrorVal, U);
+ if (auto *Call = dyn_cast<CallBase>(U))
+ verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
+ }
+}
+
+void Verifier::visitAllocaInst(AllocaInst &AI) {
+ SmallPtrSet<Type*, 4> Visited;
+ Check(AI.getAllocatedType()->isSized(&Visited),
+ "Cannot allocate unsized type", &AI);
+ Check(AI.getArraySize()->getType()->isIntegerTy(),
+ "Alloca array size must have integer type", &AI);
+ if (MaybeAlign A = AI.getAlign()) {
+ Check(A->value() <= Value::MaximumAlignment,
+ "huge alignment values are unsupported", &AI);
+ }
+
+ if (AI.isSwiftError()) {
+ Check(AI.getAllocatedType()->isPointerTy(),
+ "swifterror alloca must have pointer type", &AI);
+ Check(!AI.isArrayAllocation(),
+ "swifterror alloca must not be array allocation", &AI);
+ verifySwiftErrorValue(&AI);
+ }
+
+ visitInstruction(AI);
+}
+
+void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
+ Type *ElTy = CXI.getOperand(1)->getType();
+ Check(ElTy->isIntOrPtrTy(),
+ "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
+ checkAtomicMemAccessSize(ElTy, &CXI);
+ visitInstruction(CXI);
+}
+
+void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
+ Check(RMWI.getOrdering() != AtomicOrdering::Unordered,
+ "atomicrmw instructions cannot be unordered.", &RMWI);
+ auto Op = RMWI.getOperation();
+ Type *ElTy = RMWI.getOperand(1)->getType();
+ if (Op == AtomicRMWInst::Xchg) {
+ Check(ElTy->isIntegerTy() || ElTy->isFloatingPointTy() ||
+ ElTy->isPointerTy(),
+ "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
+ " operand must have integer or floating point type!",
+ &RMWI, ElTy);
+ } else if (AtomicRMWInst::isFPOperation(Op)) {
+ Check(ElTy->isFloatingPointTy(),
+ "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
+ " operand must have floating point type!",
+ &RMWI, ElTy);
+ } else {
+ Check(ElTy->isIntegerTy(),
+ "atomicrmw " + AtomicRMWInst::getOperationName(Op) +
+ " operand must have integer type!",
+ &RMWI, ElTy);
+ }
+ checkAtomicMemAccessSize(ElTy, &RMWI);
+ Check(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
+ "Invalid binary operation!", &RMWI);
+ visitInstruction(RMWI);
+}
+
+void Verifier::visitFenceInst(FenceInst &FI) {
+ const AtomicOrdering Ordering = FI.getOrdering();
+ Check(Ordering == AtomicOrdering::Acquire ||
+ Ordering == AtomicOrdering::Release ||
+ Ordering == AtomicOrdering::AcquireRelease ||
+ Ordering == AtomicOrdering::SequentiallyConsistent,
+ "fence instructions may only have acquire, release, acq_rel, or "
+ "seq_cst ordering.",
+ &FI);
+ visitInstruction(FI);
+}
+
+void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
+ Check(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
+ EVI.getIndices()) == EVI.getType(),
+ "Invalid ExtractValueInst operands!", &EVI);
+
+ visitInstruction(EVI);
+}
+
+void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
+ Check(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
+ IVI.getIndices()) ==
+ IVI.getOperand(1)->getType(),
+ "Invalid InsertValueInst operands!", &IVI);
+
+ visitInstruction(IVI);
+}
+
+static Value *getParentPad(Value *EHPad) {
+ if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
+ return FPI->getParentPad();
+
+ return cast<CatchSwitchInst>(EHPad)->getParentPad();
+}
+
+void Verifier::visitEHPadPredecessors(Instruction &I) {
+ assert(I.isEHPad());
+
+ BasicBlock *BB = I.getParent();
+ Function *F = BB->getParent();
+
+ Check(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
+
+ if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
+ // The landingpad instruction defines its parent as a landing pad block. The
+ // landing pad block may be branched to only by the unwind edge of an
+ // invoke.
+ for (BasicBlock *PredBB : predecessors(BB)) {
+ const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
+ Check(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
+ "Block containing LandingPadInst must be jumped to "
+ "only by the unwind edge of an invoke.",
+ LPI);
+ }
+ return;
+ }
+ if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
+ if (!pred_empty(BB))
+ Check(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
+ "Block containg CatchPadInst must be jumped to "
+ "only by its catchswitch.",
+ CPI);
+ Check(BB != CPI->getCatchSwitch()->getUnwindDest(),
+ "Catchswitch cannot unwind to one of its catchpads",
+ CPI->getCatchSwitch(), CPI);
+ return;
+ }
+
+ // Verify that each pred has a legal terminator with a legal to/from EH
+ // pad relationship.
+ Instruction *ToPad = &I;
+ Value *ToPadParent = getParentPad(ToPad);
+ for (BasicBlock *PredBB : predecessors(BB)) {
+ Instruction *TI = PredBB->getTerminator();
+ Value *FromPad;
+ if (auto *II = dyn_cast<InvokeInst>(TI)) {
+ Check(II->getUnwindDest() == BB && II->getNormalDest() != BB,
+ "EH pad must be jumped to via an unwind edge", ToPad, II);
+ if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
+ FromPad = Bundle->Inputs[0];
+ else
+ FromPad = ConstantTokenNone::get(II->getContext());
+ } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
+ FromPad = CRI->getOperand(0);
+ Check(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
+ } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
+ FromPad = CSI;
+ } else {
+ Check(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
+ }
+
+ // The edge may exit from zero or more nested pads.
+ SmallSet<Value *, 8> Seen;
+ for (;; FromPad = getParentPad(FromPad)) {
+ Check(FromPad != ToPad,
+ "EH pad cannot handle exceptions raised within it", FromPad, TI);
+ if (FromPad == ToPadParent) {
+ // This is a legal unwind edge.
+ break;
+ }
+ Check(!isa<ConstantTokenNone>(FromPad),
+ "A single unwind edge may only enter one EH pad", TI);
+ Check(Seen.insert(FromPad).second, "EH pad jumps through a cycle of pads",
+ FromPad);
+
+ // This will be diagnosed on the corresponding instruction already. We
+ // need the extra check here to make sure getParentPad() works.
+ Check(isa<FuncletPadInst>(FromPad) || isa<CatchSwitchInst>(FromPad),
+ "Parent pad must be catchpad/cleanuppad/catchswitch", TI);
+ }
+ }
+}
+
+void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
+ // The landingpad instruction is ill-formed if it doesn't have any clauses and
+ // isn't a cleanup.
+ Check(LPI.getNumClauses() > 0 || LPI.isCleanup(),
+ "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
+
+ visitEHPadPredecessors(LPI);
+
+ if (!LandingPadResultTy)
+ LandingPadResultTy = LPI.getType();
+ else
+ Check(LandingPadResultTy == LPI.getType(),
+ "The landingpad instruction should have a consistent result type "
+ "inside a function.",
+ &LPI);
+
+ Function *F = LPI.getParent()->getParent();
+ Check(F->hasPersonalityFn(),
+ "LandingPadInst needs to be in a function with a personality.", &LPI);
+
+ // The landingpad instruction must be the first non-PHI instruction in the
+ // block.
+ Check(LPI.getParent()->getLandingPadInst() == &LPI,
+ "LandingPadInst not the first non-PHI instruction in the block.", &LPI);
+
+ for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
+ Constant *Clause = LPI.getClause(i);
+ if (LPI.isCatch(i)) {
+ Check(isa<PointerType>(Clause->getType()),
+ "Catch operand does not have pointer type!", &LPI);
+ } else {
+ Check(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
+ Check(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
+ "Filter operand is not an array of constants!", &LPI);
+ }
+ }
+
+ visitInstruction(LPI);
+}
+
+void Verifier::visitResumeInst(ResumeInst &RI) {
+ Check(RI.getFunction()->hasPersonalityFn(),
+ "ResumeInst needs to be in a function with a personality.", &RI);
+
+ if (!LandingPadResultTy)
+ LandingPadResultTy = RI.getValue()->getType();
+ else
+ Check(LandingPadResultTy == RI.getValue()->getType(),
+ "The resume instruction should have a consistent result type "
+ "inside a function.",
+ &RI);
+
+ visitTerminator(RI);
+}
+
+void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
+ BasicBlock *BB = CPI.getParent();
+
+ Function *F = BB->getParent();
+ Check(F->hasPersonalityFn(),
+ "CatchPadInst needs to be in a function with a personality.", &CPI);
+
+ Check(isa<CatchSwitchInst>(CPI.getParentPad()),
+ "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
+ CPI.getParentPad());
+
+ // The catchpad instruction must be the first non-PHI instruction in the
+ // block.
+ Check(BB->getFirstNonPHI() == &CPI,
+ "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
+
+ visitEHPadPredecessors(CPI);
+ visitFuncletPadInst(CPI);
+}
+
+void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
+ Check(isa<CatchPadInst>(CatchReturn.getOperand(0)),
+ "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
+ CatchReturn.getOperand(0));
+
+ visitTerminator(CatchReturn);
+}
+
+void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
+ BasicBlock *BB = CPI.getParent();
+
+ Function *F = BB->getParent();
+ Check(F->hasPersonalityFn(),
+ "CleanupPadInst needs to be in a function with a personality.", &CPI);
+
+ // The cleanuppad instruction must be the first non-PHI instruction in the
+ // block.
+ Check(BB->getFirstNonPHI() == &CPI,
+ "CleanupPadInst not the first non-PHI instruction in the block.", &CPI);
+
+ auto *ParentPad = CPI.getParentPad();
+ Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
+ "CleanupPadInst has an invalid parent.", &CPI);
+
+ visitEHPadPredecessors(CPI);
+ visitFuncletPadInst(CPI);
+}
+
+void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
+ User *FirstUser = nullptr;
+ Value *FirstUnwindPad = nullptr;
+ SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
+ SmallSet<FuncletPadInst *, 8> Seen;
+
+ while (!Worklist.empty()) {
+ FuncletPadInst *CurrentPad = Worklist.pop_back_val();
+ Check(Seen.insert(CurrentPad).second,
+ "FuncletPadInst must not be nested within itself", CurrentPad);
+ Value *UnresolvedAncestorPad = nullptr;
+ for (User *U : CurrentPad->users()) {
+ BasicBlock *UnwindDest;
+ if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
+ UnwindDest = CRI->getUnwindDest();
+ } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
+ // We allow catchswitch unwind to caller to nest
+ // within an outer pad that unwinds somewhere else,
+ // because catchswitch doesn't have a nounwind variant.
+ // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
+ if (CSI->unwindsToCaller())
+ continue;
+ UnwindDest = CSI->getUnwindDest();
+ } else if (auto *II = dyn_cast<InvokeInst>(U)) {
+ UnwindDest = II->getUnwindDest();
+ } else if (isa<CallInst>(U)) {
+ // Calls which don't unwind may be found inside funclet
+ // pads that unwind somewhere else. We don't *require*
+ // such calls to be annotated nounwind.
+ continue;
+ } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
+ // The unwind dest for a cleanup can only be found by
+ // recursive search. Add it to the worklist, and we'll
+ // search for its first use that determines where it unwinds.
+ Worklist.push_back(CPI);
+ continue;
+ } else {
+ Check(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
+ continue;
+ }
+
+ Value *UnwindPad;
+ bool ExitsFPI;
+ if (UnwindDest) {
+ UnwindPad = UnwindDest->getFirstNonPHI();
+ if (!cast<Instruction>(UnwindPad)->isEHPad())
+ continue;
+ Value *UnwindParent = getParentPad(UnwindPad);
+ // Ignore unwind edges that don't exit CurrentPad.
+ if (UnwindParent == CurrentPad)
+ continue;
+ // Determine whether the original funclet pad is exited,
+ // and if we are scanning nested pads determine how many
+ // of them are exited so we can stop searching their
+ // children.
+ Value *ExitedPad = CurrentPad;
+ ExitsFPI = false;
+ do {
+ if (ExitedPad == &FPI) {
+ ExitsFPI = true;
+ // Now we can resolve any ancestors of CurrentPad up to
+ // FPI, but not including FPI since we need to make sure
+ // to check all direct users of FPI for consistency.
+ UnresolvedAncestorPad = &FPI;
+ break;
+ }
+ Value *ExitedParent = getParentPad(ExitedPad);
+ if (ExitedParent == UnwindParent) {
+ // ExitedPad is the ancestor-most pad which this unwind
+ // edge exits, so we can resolve up to it, meaning that
+ // ExitedParent is the first ancestor still unresolved.
+ UnresolvedAncestorPad = ExitedParent;
+ break;
+ }
+ ExitedPad = ExitedParent;
+ } while (!isa<ConstantTokenNone>(ExitedPad));
+ } else {
+ // Unwinding to caller exits all pads.
+ UnwindPad = ConstantTokenNone::get(FPI.getContext());
+ ExitsFPI = true;
+ UnresolvedAncestorPad = &FPI;
+ }
+
+ if (ExitsFPI) {
+ // This unwind edge exits FPI. Make sure it agrees with other
+ // such edges.
+ if (FirstUser) {
+ Check(UnwindPad == FirstUnwindPad,
+ "Unwind edges out of a funclet "
+ "pad must have the same unwind "
+ "dest",
+ &FPI, U, FirstUser);
+ } else {
+ FirstUser = U;
+ FirstUnwindPad = UnwindPad;
+ // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
+ if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
+ getParentPad(UnwindPad) == getParentPad(&FPI))
+ SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
+ }
+ }
+ // Make sure we visit all uses of FPI, but for nested pads stop as
+ // soon as we know where they unwind to.
+ if (CurrentPad != &FPI)
+ break;
+ }
+ if (UnresolvedAncestorPad) {
+ if (CurrentPad == UnresolvedAncestorPad) {
+ // When CurrentPad is FPI itself, we don't mark it as resolved even if
+ // we've found an unwind edge that exits it, because we need to verify
+ // all direct uses of FPI.
+ assert(CurrentPad == &FPI);
+ continue;
+ }
+ // Pop off the worklist any nested pads that we've found an unwind
+ // destination for. The pads on the worklist are the uncles,
+ // great-uncles, etc. of CurrentPad. We've found an unwind destination
+ // for all ancestors of CurrentPad up to but not including
+ // UnresolvedAncestorPad.
+ Value *ResolvedPad = CurrentPad;
+ while (!Worklist.empty()) {
+ Value *UnclePad = Worklist.back();
+ Value *AncestorPad = getParentPad(UnclePad);
+ // Walk ResolvedPad up the ancestor list until we either find the
+ // uncle's parent or the last resolved ancestor.
+ while (ResolvedPad != AncestorPad) {
+ Value *ResolvedParent = getParentPad(ResolvedPad);
+ if (ResolvedParent == UnresolvedAncestorPad) {
+ break;
+ }
+ ResolvedPad = ResolvedParent;
+ }
+ // If the resolved ancestor search didn't find the uncle's parent,
+ // then the uncle is not yet resolved.
+ if (ResolvedPad != AncestorPad)
+ break;
+ // This uncle is resolved, so pop it from the worklist.
+ Worklist.pop_back();
+ }
+ }
+ }
+
+ if (FirstUnwindPad) {
+ if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
+ BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
+ Value *SwitchUnwindPad;
+ if (SwitchUnwindDest)
+ SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
+ else
+ SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
+ Check(SwitchUnwindPad == FirstUnwindPad,
+ "Unwind edges out of a catch must have the same unwind dest as "
+ "the parent catchswitch",
+ &FPI, FirstUser, CatchSwitch);
+ }
+ }
+
+ visitInstruction(FPI);
+}
+
+void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
+ BasicBlock *BB = CatchSwitch.getParent();
+
+ Function *F = BB->getParent();
+ Check(F->hasPersonalityFn(),
+ "CatchSwitchInst needs to be in a function with a personality.",
+ &CatchSwitch);
+
+ // The catchswitch instruction must be the first non-PHI instruction in the
+ // block.
+ Check(BB->getFirstNonPHI() == &CatchSwitch,
+ "CatchSwitchInst not the first non-PHI instruction in the block.",
+ &CatchSwitch);
+
+ auto *ParentPad = CatchSwitch.getParentPad();
+ Check(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
+ "CatchSwitchInst has an invalid parent.", ParentPad);
+
+ if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
+ Instruction *I = UnwindDest->getFirstNonPHI();
+ Check(I->isEHPad() && !isa<LandingPadInst>(I),
+ "CatchSwitchInst must unwind to an EH block which is not a "
+ "landingpad.",
+ &CatchSwitch);
+
+ // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
+ if (getParentPad(I) == ParentPad)
+ SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
+ }
+
+ Check(CatchSwitch.getNumHandlers() != 0,
+ "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
+
+ for (BasicBlock *Handler : CatchSwitch.handlers()) {
+ Check(isa<CatchPadInst>(Handler->getFirstNonPHI()),
+ "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
+ }
+
+ visitEHPadPredecessors(CatchSwitch);
+ visitTerminator(CatchSwitch);
+}
+
+void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
+ Check(isa<CleanupPadInst>(CRI.getOperand(0)),
+ "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
+ CRI.getOperand(0));
+
+ if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
+ Instruction *I = UnwindDest->getFirstNonPHI();
+ Check(I->isEHPad() && !isa<LandingPadInst>(I),
+ "CleanupReturnInst must unwind to an EH block which is not a "
+ "landingpad.",
+ &CRI);
+ }
+
+ visitTerminator(CRI);
+}
+
+void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
+ Instruction *Op = cast<Instruction>(I.getOperand(i));
+ // If the we have an invalid invoke, don't try to compute the dominance.
+ // We already reject it in the invoke specific checks and the dominance
+ // computation doesn't handle multiple edges.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
+ if (II->getNormalDest() == II->getUnwindDest())
+ return;
+ }
+
+ // Quick check whether the def has already been encountered in the same block.
+ // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
+ // uses are defined to happen on the incoming edge, not at the instruction.
+ //
+ // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
+ // wrapping an SSA value, assert that we've already encountered it. See
+ // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
+ if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
+ return;
+
+ const Use &U = I.getOperandUse(i);
+ Check(DT.dominates(Op, U), "Instruction does not dominate all uses!", Op, &I);
+}
+
+void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
+ Check(I.getType()->isPointerTy(),
+ "dereferenceable, dereferenceable_or_null "
+ "apply only to pointer types",
+ &I);
+ Check((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
+ "dereferenceable, dereferenceable_or_null apply only to load"
+ " and inttoptr instructions, use attributes for calls or invokes",
+ &I);
+ Check(MD->getNumOperands() == 1,
+ "dereferenceable, dereferenceable_or_null "
+ "take one operand!",
+ &I);
+ ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
+ Check(CI && CI->getType()->isIntegerTy(64),
+ "dereferenceable, "
+ "dereferenceable_or_null metadata value must be an i64!",
+ &I);
+}
+
+void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
+ Check(MD->getNumOperands() >= 2,
+ "!prof annotations should have no less than 2 operands", MD);
+
+ // Check first operand.
+ Check(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
+ Check(isa<MDString>(MD->getOperand(0)),
+ "expected string with name of the !prof annotation", MD);
+ MDString *MDS = cast<MDString>(MD->getOperand(0));
+ StringRef ProfName = MDS->getString();
+
+ // Check consistency of !prof branch_weights metadata.
+ if (ProfName.equals("branch_weights")) {
+ if (isa<InvokeInst>(&I)) {
+ Check(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
+ "Wrong number of InvokeInst branch_weights operands", MD);
+ } else {
+ unsigned ExpectedNumOperands = 0;
+ if (BranchInst *BI = dyn_cast<BranchInst>(&I))
+ ExpectedNumOperands = BI->getNumSuccessors();
+ else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
+ ExpectedNumOperands = SI->getNumSuccessors();
+ else if (isa<CallInst>(&I))
+ ExpectedNumOperands = 1;
+ else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
+ ExpectedNumOperands = IBI->getNumDestinations();
+ else if (isa<SelectInst>(&I))
+ ExpectedNumOperands = 2;
+ else
+ CheckFailed("!prof branch_weights are not allowed for this instruction",
+ MD);
+
+ Check(MD->getNumOperands() == 1 + ExpectedNumOperands,
+ "Wrong number of operands", MD);
+ }
+ for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
+ auto &MDO = MD->getOperand(i);
+ Check(MDO, "second operand should not be null", MD);
+ Check(mdconst::dyn_extract<ConstantInt>(MDO),
+ "!prof brunch_weights operand is not a const int");
+ }
+ }
+}
+
+void Verifier::visitAnnotationMetadata(MDNode *Annotation) {
+ Check(isa<MDTuple>(Annotation), "annotation must be a tuple");
+ Check(Annotation->getNumOperands() >= 1,
+ "annotation must have at least one operand");
+ for (const MDOperand &Op : Annotation->operands())
+ Check(isa<MDString>(Op.get()), "operands must be strings");
+}
+
+void Verifier::visitAliasScopeMetadata(const MDNode *MD) {
+ unsigned NumOps = MD->getNumOperands();
+ Check(NumOps >= 2 && NumOps <= 3, "scope must have two or three operands",
+ MD);
+ Check(MD->getOperand(0).get() == MD || isa<MDString>(MD->getOperand(0)),
+ "first scope operand must be self-referential or string", MD);
+ if (NumOps == 3)
+ Check(isa<MDString>(MD->getOperand(2)),
+ "third scope operand must be string (if used)", MD);
+
+ MDNode *Domain = dyn_cast<MDNode>(MD->getOperand(1));
+ Check(Domain != nullptr, "second scope operand must be MDNode", MD);
+
+ unsigned NumDomainOps = Domain->getNumOperands();
+ Check(NumDomainOps >= 1 && NumDomainOps <= 2,
+ "domain must have one or two operands", Domain);
+ Check(Domain->getOperand(0).get() == Domain ||
+ isa<MDString>(Domain->getOperand(0)),
+ "first domain operand must be self-referential or string", Domain);
+ if (NumDomainOps == 2)
+ Check(isa<MDString>(Domain->getOperand(1)),
+ "second domain operand must be string (if used)", Domain);
+}
+
+void Verifier::visitAliasScopeListMetadata(const MDNode *MD) {
+ for (const MDOperand &Op : MD->operands()) {
+ const MDNode *OpMD = dyn_cast<MDNode>(Op);
+ Check(OpMD != nullptr, "scope list must consist of MDNodes", MD);
+ visitAliasScopeMetadata(OpMD);
+ }
+}
+
+void Verifier::visitAccessGroupMetadata(const MDNode *MD) {
+ auto IsValidAccessScope = [](const MDNode *MD) {
+ return MD->getNumOperands() == 0 && MD->isDistinct();
+ };
+
+ // It must be either an access scope itself...
+ if (IsValidAccessScope(MD))
+ return;
+
+ // ...or a list of access scopes.
+ for (const MDOperand &Op : MD->operands()) {
+ const MDNode *OpMD = dyn_cast<MDNode>(Op);
+ Check(OpMD != nullptr, "Access scope list must consist of MDNodes", MD);
+ Check(IsValidAccessScope(OpMD),
+ "Access scope list contains invalid access scope", MD);
+ }
+}
+
+/// verifyInstruction - Verify that an instruction is well formed.
+///
+void Verifier::visitInstruction(Instruction &I) {
+ BasicBlock *BB = I.getParent();
+ Check(BB, "Instruction not embedded in basic block!", &I);
+
+ if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
+ for (User *U : I.users()) {
+ Check(U != (User *)&I || !DT.isReachableFromEntry(BB),
+ "Only PHI nodes may reference their own value!", &I);
+ }
+ }
+
+ // Check that void typed values don't have names
+ Check(!I.getType()->isVoidTy() || !I.hasName(),
+ "Instruction has a name, but provides a void value!", &I);
+
+ // Check that the return value of the instruction is either void or a legal
+ // value type.
+ Check(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
+ "Instruction returns a non-scalar type!", &I);
+
+ // Check that the instruction doesn't produce metadata. Calls are already
+ // checked against the callee type.
+ Check(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
+ "Invalid use of metadata!", &I);
+
+ // Check that all uses of the instruction, if they are instructions
+ // themselves, actually have parent basic blocks. If the use is not an
+ // instruction, it is an error!
+ for (Use &U : I.uses()) {
+ if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
+ Check(Used->getParent() != nullptr,
+ "Instruction referencing"
+ " instruction not embedded in a basic block!",
+ &I, Used);
+ else {
+ CheckFailed("Use of instruction is not an instruction!", U);
+ return;
+ }
+ }
+
+ // Get a pointer to the call base of the instruction if it is some form of
+ // call.
+ const CallBase *CBI = dyn_cast<CallBase>(&I);
+
+ for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
+ Check(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
+
+ // Check to make sure that only first-class-values are operands to
+ // instructions.
+ if (!I.getOperand(i)->getType()->isFirstClassType()) {
+ Check(false, "Instruction operands must be first-class values!", &I);
+ }
+
+ if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
+ // This code checks whether the function is used as the operand of a
+ // clang_arc_attachedcall operand bundle.
+ auto IsAttachedCallOperand = [](Function *F, const CallBase *CBI,
+ int Idx) {
+ return CBI && CBI->isOperandBundleOfType(
+ LLVMContext::OB_clang_arc_attachedcall, Idx);
+ };
+
+ // Check to make sure that the "address of" an intrinsic function is never
+ // taken. Ignore cases where the address of the intrinsic function is used
+ // as the argument of operand bundle "clang.arc.attachedcall" as those
+ // cases are handled in verifyAttachedCallBundle.
+ Check((!F->isIntrinsic() ||
+ (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
+ IsAttachedCallOperand(F, CBI, i)),
+ "Cannot take the address of an intrinsic!", &I);
+ Check(!F->isIntrinsic() || isa<CallInst>(I) ||
+ F->getIntrinsicID() == Intrinsic::donothing ||
+ F->getIntrinsicID() == Intrinsic::seh_try_begin ||
+ F->getIntrinsicID() == Intrinsic::seh_try_end ||
+ F->getIntrinsicID() == Intrinsic::seh_scope_begin ||
+ F->getIntrinsicID() == Intrinsic::seh_scope_end ||
+ F->getIntrinsicID() == Intrinsic::coro_resume ||
+ F->getIntrinsicID() == Intrinsic::coro_destroy ||
+ F->getIntrinsicID() ==
+ Intrinsic::experimental_patchpoint_void ||
+ F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
+ F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
+ F->getIntrinsicID() == Intrinsic::wasm_rethrow ||
+ IsAttachedCallOperand(F, CBI, i),
+ "Cannot invoke an intrinsic other than donothing, patchpoint, "
+ "statepoint, coro_resume, coro_destroy or clang.arc.attachedcall",
+ &I);
+ Check(F->getParent() == &M, "Referencing function in another module!", &I,
+ &M, F, F->getParent());
+ } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
+ Check(OpBB->getParent() == BB->getParent(),
+ "Referring to a basic block in another function!", &I);
+ } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
+ Check(OpArg->getParent() == BB->getParent(),
+ "Referring to an argument in another function!", &I);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
+ Check(GV->getParent() == &M, "Referencing global in another module!", &I,
+ &M, GV, GV->getParent());
+ } else if (isa<Instruction>(I.getOperand(i))) {
+ verifyDominatesUse(I, i);
+ } else if (isa<InlineAsm>(I.getOperand(i))) {
+ Check(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
+ "Cannot take the address of an inline asm!", &I);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
+ if (CE->getType()->isPtrOrPtrVectorTy()) {
+ // If we have a ConstantExpr pointer, we need to see if it came from an
+ // illegal bitcast.
+ visitConstantExprsRecursively(CE);
+ }
+ }
+ }
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
+ Check(I.getType()->isFPOrFPVectorTy(),
+ "fpmath requires a floating point result!", &I);
+ Check(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
+ if (ConstantFP *CFP0 =
+ mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
+ const APFloat &Accuracy = CFP0->getValueAPF();
+ Check(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
+ "fpmath accuracy must have float type", &I);
+ Check(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
+ "fpmath accuracy not a positive number!", &I);
+ } else {
+ Check(false, "invalid fpmath accuracy!", &I);
+ }
+ }
+
+ if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
+ Check(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
+ "Ranges are only for loads, calls and invokes!", &I);
+ visitRangeMetadata(I, Range, I.getType());
+ }
+
+ if (I.hasMetadata(LLVMContext::MD_invariant_group)) {
+ Check(isa<LoadInst>(I) || isa<StoreInst>(I),
+ "invariant.group metadata is only for loads and stores", &I);
+ }
+
+ if (I.getMetadata(LLVMContext::MD_nonnull)) {
+ Check(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
+ &I);
+ Check(isa<LoadInst>(I),
+ "nonnull applies only to load instructions, use attributes"
+ " for calls or invokes",
+ &I);
+ }
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
+ visitDereferenceableMetadata(I, MD);
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
+ visitDereferenceableMetadata(I, MD);
+
+ if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
+ TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_noalias))
+ visitAliasScopeListMetadata(MD);
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_alias_scope))
+ visitAliasScopeListMetadata(MD);
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_access_group))
+ visitAccessGroupMetadata(MD);
+
+ if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
+ Check(I.getType()->isPointerTy(), "align applies only to pointer types",
+ &I);
+ Check(isa<LoadInst>(I),
+ "align applies only to load instructions, "
+ "use attributes for calls or invokes",
+ &I);
+ Check(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
+ ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
+ Check(CI && CI->getType()->isIntegerTy(64),
+ "align metadata value must be an i64!", &I);
+ uint64_t Align = CI->getZExtValue();
+ Check(isPowerOf2_64(Align), "align metadata value must be a power of 2!",
+ &I);
+ Check(Align <= Value::MaximumAlignment,
+ "alignment is larger that implementation defined limit", &I);
+ }
+
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
+ visitProfMetadata(I, MD);
+
+ if (MDNode *Annotation = I.getMetadata(LLVMContext::MD_annotation))
+ visitAnnotationMetadata(Annotation);
+
+ if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
+ CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
+ visitMDNode(*N, AreDebugLocsAllowed::Yes);
+ }
+
+ if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
+ verifyFragmentExpression(*DII);
+ verifyNotEntryValue(*DII);
+ }
+
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ I.getAllMetadata(MDs);
+ for (auto Attachment : MDs) {
+ unsigned Kind = Attachment.first;
+ auto AllowLocs =
+ (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
+ ? AreDebugLocsAllowed::Yes
+ : AreDebugLocsAllowed::No;
+ visitMDNode(*Attachment.second, AllowLocs);
+ }
+
+ InstsInThisBlock.insert(&I);
+}
+
+/// Allow intrinsics to be verified in different ways.
+void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
+ Function *IF = Call.getCalledFunction();
+ Check(IF->isDeclaration(), "Intrinsic functions should never be defined!",
+ IF);
+
+ // Verify that the intrinsic prototype lines up with what the .td files
+ // describe.
+ FunctionType *IFTy = IF->getFunctionType();
+ bool IsVarArg = IFTy->isVarArg();
+
+ SmallVector<Intrinsic::IITDescriptor, 8> Table;
+ getIntrinsicInfoTableEntries(ID, Table);
+ ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
+
+ // Walk the descriptors to extract overloaded types.
+ SmallVector<Type *, 4> ArgTys;
+ Intrinsic::MatchIntrinsicTypesResult Res =
+ Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
+ Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
+ "Intrinsic has incorrect return type!", IF);
+ Check(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
+ "Intrinsic has incorrect argument type!", IF);
+
+ // Verify if the intrinsic call matches the vararg property.
+ if (IsVarArg)
+ Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
+ "Intrinsic was not defined with variable arguments!", IF);
+ else
+ Check(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
+ "Callsite was not defined with variable arguments!", IF);
+
+ // All descriptors should be absorbed by now.
+ Check(TableRef.empty(), "Intrinsic has too few arguments!", IF);
+
+ // Now that we have the intrinsic ID and the actual argument types (and we
+ // know they are legal for the intrinsic!) get the intrinsic name through the
+ // usual means. This allows us to verify the mangling of argument types into
+ // the name.
+ const std::string ExpectedName =
+ Intrinsic::getName(ID, ArgTys, IF->getParent(), IFTy);
+ Check(ExpectedName == IF->getName(),
+ "Intrinsic name not mangled correctly for type arguments! "
+ "Should be: " +
+ ExpectedName,
+ IF);
+
+ // If the intrinsic takes MDNode arguments, verify that they are either global
+ // or are local to *this* function.
+ for (Value *V : Call.args()) {
+ if (auto *MD = dyn_cast<MetadataAsValue>(V))
+ visitMetadataAsValue(*MD, Call.getCaller());
+ if (auto *Const = dyn_cast<Constant>(V))
+ Check(!Const->getType()->isX86_AMXTy(),
+ "const x86_amx is not allowed in argument!");
+ }
+
+ switch (ID) {
+ default:
+ break;
+ case Intrinsic::assume: {
+ for (auto &Elem : Call.bundle_op_infos()) {
+ Check(Elem.Tag->getKey() == "ignore" ||
+ Attribute::isExistingAttribute(Elem.Tag->getKey()),
+ "tags must be valid attribute names", Call);
+ Attribute::AttrKind Kind =
+ Attribute::getAttrKindFromName(Elem.Tag->getKey());
+ unsigned ArgCount = Elem.End - Elem.Begin;
+ if (Kind == Attribute::Alignment) {
+ Check(ArgCount <= 3 && ArgCount >= 2,
+ "alignment assumptions should have 2 or 3 arguments", Call);
+ Check(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),
+ "first argument should be a pointer", Call);
+ Check(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),
+ "second argument should be an integer", Call);
+ if (ArgCount == 3)
+ Check(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),
+ "third argument should be an integer if present", Call);
+ return;
+ }
+ Check(ArgCount <= 2, "too many arguments", Call);
+ if (Kind == Attribute::None)
+ break;
+ if (Attribute::isIntAttrKind(Kind)) {
+ Check(ArgCount == 2, "this attribute should have 2 arguments", Call);
+ Check(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
+ "the second argument should be a constant integral value", Call);
+ } else if (Attribute::canUseAsParamAttr(Kind)) {
+ Check((ArgCount) == 1, "this attribute should have one argument", Call);
+ } else if (Attribute::canUseAsFnAttr(Kind)) {
+ Check((ArgCount) == 0, "this attribute has no argument", Call);
+ }
+ }
+ break;
+ }
+ case Intrinsic::coro_id: {
+ auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
+ if (isa<ConstantPointerNull>(InfoArg))
+ break;
+ auto *GV = dyn_cast<GlobalVariable>(InfoArg);
+ Check(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
+ "info argument of llvm.coro.id must refer to an initialized "
+ "constant");
+ Constant *Init = GV->getInitializer();
+ Check(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
+ "info argument of llvm.coro.id must refer to either a struct or "
+ "an array");
+ break;
+ }
+ case Intrinsic::fptrunc_round: {
+ // Check the rounding mode
+ Metadata *MD = nullptr;
+ auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
+ if (MAV)
+ MD = MAV->getMetadata();
+
+ Check(MD != nullptr, "missing rounding mode argument", Call);
+
+ Check(isa<MDString>(MD),
+ ("invalid value for llvm.fptrunc.round metadata operand"
+ " (the operand should be a string)"),
+ MD);
+
+ Optional<RoundingMode> RoundMode =
+ convertStrToRoundingMode(cast<MDString>(MD)->getString());
+ Check(RoundMode && *RoundMode != RoundingMode::Dynamic,
+ "unsupported rounding mode argument", Call);
+ break;
+ }
+#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
+#include "llvm/IR/VPIntrinsics.def"
+ visitVPIntrinsic(cast<VPIntrinsic>(Call));
+ break;
+#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC:
+#include "llvm/IR/ConstrainedOps.def"
+ visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
+ break;
+ case Intrinsic::dbg_declare: // llvm.dbg.declare
+ Check(isa<MetadataAsValue>(Call.getArgOperand(0)),
+ "invalid llvm.dbg.declare intrinsic call 1", Call);
+ visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
+ break;
+ case Intrinsic::dbg_addr: // llvm.dbg.addr
+ visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
+ break;
+ case Intrinsic::dbg_value: // llvm.dbg.value
+ visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
+ break;
+ case Intrinsic::dbg_label: // llvm.dbg.label
+ visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
+ break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memcpy_inline:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ case Intrinsic::memset_inline: {
+ const auto *MI = cast<MemIntrinsic>(&Call);
+ auto IsValidAlignment = [&](unsigned Alignment) -> bool {
+ return Alignment == 0 || isPowerOf2_32(Alignment);
+ };
+ Check(IsValidAlignment(MI->getDestAlignment()),
+ "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
+ Call);
+ if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
+ Check(IsValidAlignment(MTI->getSourceAlignment()),
+ "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
+ Call);
+ }
+
+ break;
+ }
+ case Intrinsic::memcpy_element_unordered_atomic:
+ case Intrinsic::memmove_element_unordered_atomic:
+ case Intrinsic::memset_element_unordered_atomic: {
+ const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
+
+ ConstantInt *ElementSizeCI =
+ cast<ConstantInt>(AMI->getRawElementSizeInBytes());
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Check(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
+ Call);
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = AMI->getDestAlignment();
+ Check(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", Call);
+ if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
+ uint64_t SrcAlignment = AMT->getSourceAlignment();
+ Check(IsValidAlignment(SrcAlignment),
+ "incorrect alignment of the source argument", Call);
+ }
+ break;
+ }
+ case Intrinsic::call_preallocated_setup: {
+ auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
+ Check(NumArgs != nullptr,
+ "llvm.call.preallocated.setup argument must be a constant");
+ bool FoundCall = false;
+ for (User *U : Call.users()) {
+ auto *UseCall = dyn_cast<CallBase>(U);
+ Check(UseCall != nullptr,
+ "Uses of llvm.call.preallocated.setup must be calls");
+ const Function *Fn = UseCall->getCalledFunction();
+ if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
+ auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
+ Check(AllocArgIndex != nullptr,
+ "llvm.call.preallocated.alloc arg index must be a constant");
+ auto AllocArgIndexInt = AllocArgIndex->getValue();
+ Check(AllocArgIndexInt.sge(0) &&
+ AllocArgIndexInt.slt(NumArgs->getValue()),
+ "llvm.call.preallocated.alloc arg index must be between 0 and "
+ "corresponding "
+ "llvm.call.preallocated.setup's argument count");
+ } else if (Fn && Fn->getIntrinsicID() ==
+ Intrinsic::call_preallocated_teardown) {
+ // nothing to do
+ } else {
+ Check(!FoundCall, "Can have at most one call corresponding to a "
+ "llvm.call.preallocated.setup");
+ FoundCall = true;
+ size_t NumPreallocatedArgs = 0;
+ for (unsigned i = 0; i < UseCall->arg_size(); i++) {
+ if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
+ ++NumPreallocatedArgs;
+ }
+ }
+ Check(NumPreallocatedArgs != 0,
+ "cannot use preallocated intrinsics on a call without "
+ "preallocated arguments");
+ Check(NumArgs->equalsInt(NumPreallocatedArgs),
+ "llvm.call.preallocated.setup arg size must be equal to number "
+ "of preallocated arguments "
+ "at call site",
+ Call, *UseCall);
+ // getOperandBundle() cannot be called if more than one of the operand
+ // bundle exists. There is already a check elsewhere for this, so skip
+ // here if we see more than one.
+ if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
+ 1) {
+ return;
+ }
+ auto PreallocatedBundle =
+ UseCall->getOperandBundle(LLVMContext::OB_preallocated);
+ Check(PreallocatedBundle,
+ "Use of llvm.call.preallocated.setup outside intrinsics "
+ "must be in \"preallocated\" operand bundle");
+ Check(PreallocatedBundle->Inputs.front().get() == &Call,
+ "preallocated bundle must have token from corresponding "
+ "llvm.call.preallocated.setup");
+ }
+ }
+ break;
+ }
+ case Intrinsic::call_preallocated_arg: {
+ auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
+ Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::call_preallocated_setup,
+ "llvm.call.preallocated.arg token argument must be a "
+ "llvm.call.preallocated.setup");
+ Check(Call.hasFnAttr(Attribute::Preallocated),
+ "llvm.call.preallocated.arg must be called with a \"preallocated\" "
+ "call site attribute");
+ break;
+ }
+ case Intrinsic::call_preallocated_teardown: {
+ auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
+ Check(Token && Token->getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::call_preallocated_setup,
+ "llvm.call.preallocated.teardown token argument must be a "
+ "llvm.call.preallocated.setup");
+ break;
+ }
+ case Intrinsic::gcroot:
+ case Intrinsic::gcwrite:
+ case Intrinsic::gcread:
+ if (ID == Intrinsic::gcroot) {
+ AllocaInst *AI =
+ dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
+ Check(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
+ Check(isa<Constant>(Call.getArgOperand(1)),
+ "llvm.gcroot parameter #2 must be a constant.", Call);
+ if (!AI->getAllocatedType()->isPointerTy()) {
+ Check(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
+ "llvm.gcroot parameter #1 must either be a pointer alloca, "
+ "or argument #2 must be a non-null constant.",
+ Call);
+ }
+ }
+
+ Check(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
+ break;
+ case Intrinsic::init_trampoline:
+ Check(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
+ "llvm.init_trampoline parameter #2 must resolve to a function.",
+ Call);
+ break;
+ case Intrinsic::prefetch:
+ Check(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
+ cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
+ "invalid arguments to llvm.prefetch", Call);
+ break;
+ case Intrinsic::stackprotector:
+ Check(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
+ "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
+ break;
+ case Intrinsic::localescape: {
+ BasicBlock *BB = Call.getParent();
+ Check(BB == &BB->getParent()->front(),
+ "llvm.localescape used outside of entry block", Call);
+ Check(!SawFrameEscape, "multiple calls to llvm.localescape in one function",
+ Call);
+ for (Value *Arg : Call.args()) {
+ if (isa<ConstantPointerNull>(Arg))
+ continue; // Null values are allowed as placeholders.
+ auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
+ Check(AI && AI->isStaticAlloca(),
+ "llvm.localescape only accepts static allocas", Call);
+ }
+ FrameEscapeInfo[BB->getParent()].first = Call.arg_size();
+ SawFrameEscape = true;
+ break;
+ }
+ case Intrinsic::localrecover: {
+ Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
+ Function *Fn = dyn_cast<Function>(FnArg);
+ Check(Fn && !Fn->isDeclaration(),
+ "llvm.localrecover first "
+ "argument must be function defined in this module",
+ Call);
+ auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
+ auto &Entry = FrameEscapeInfo[Fn];
+ Entry.second = unsigned(
+ std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
+ break;
+ }
+
+ case Intrinsic::experimental_gc_statepoint:
+ if (auto *CI = dyn_cast<CallInst>(&Call))
+ Check(!CI->isInlineAsm(),
+ "gc.statepoint support for inline assembly unimplemented", CI);
+ Check(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
+
+ verifyStatepoint(Call);
+ break;
+ case Intrinsic::experimental_gc_result: {
+ Check(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
+ // Are we tied to a statepoint properly?
+ const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
+ const Function *StatepointFn =
+ StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
+ Check(StatepointFn && StatepointFn->isDeclaration() &&
+ StatepointFn->getIntrinsicID() ==
+ Intrinsic::experimental_gc_statepoint,
+ "gc.result operand #1 must be from a statepoint", Call,
+ Call.getArgOperand(0));
+
+ // Check that result type matches wrapped callee.
+ auto *TargetFuncType =
+ cast<FunctionType>(StatepointCall->getParamElementType(2));
+ Check(Call.getType() == TargetFuncType->getReturnType(),
+ "gc.result result type does not match wrapped callee", Call);
+ break;
+ }
+ case Intrinsic::experimental_gc_relocate: {
+ Check(Call.arg_size() == 3, "wrong number of arguments", Call);
+
+ Check(isa<PointerType>(Call.getType()->getScalarType()),
+ "gc.relocate must return a pointer or a vector of pointers", Call);
+
+ // Check that this relocate is correctly tied to the statepoint
+
+ // This is case for relocate on the unwinding path of an invoke statepoint
+ if (LandingPadInst *LandingPad =
+ dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
+
+ const BasicBlock *InvokeBB =
+ LandingPad->getParent()->getUniquePredecessor();
+
+ // Landingpad relocates should have only one predecessor with invoke
+ // statepoint terminator
+ Check(InvokeBB, "safepoints should have unique landingpads",
+ LandingPad->getParent());
+ Check(InvokeBB->getTerminator(), "safepoint block should be well formed",
+ InvokeBB);
+ Check(isa<GCStatepointInst>(InvokeBB->getTerminator()),
+ "gc relocate should be linked to a statepoint", InvokeBB);
+ } else {
+ // In all other cases relocate should be tied to the statepoint directly.
+ // This covers relocates on a normal return path of invoke statepoint and
+ // relocates of a call statepoint.
+ auto Token = Call.getArgOperand(0);
+ Check(isa<GCStatepointInst>(Token),
+ "gc relocate is incorrectly tied to the statepoint", Call, Token);
+ }
+
+ // Verify rest of the relocate arguments.
+ const CallBase &StatepointCall =
+ *cast<GCRelocateInst>(Call).getStatepoint();
+
+ // Both the base and derived must be piped through the safepoint.
+ Value *Base = Call.getArgOperand(1);
+ Check(isa<ConstantInt>(Base),
+ "gc.relocate operand #2 must be integer offset", Call);
+
+ Value *Derived = Call.getArgOperand(2);
+ Check(isa<ConstantInt>(Derived),
+ "gc.relocate operand #3 must be integer offset", Call);
+
+ const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
+ const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
+
+ // Check the bounds
+ if (auto Opt = StatepointCall.getOperandBundle(LLVMContext::OB_gc_live)) {
+ Check(BaseIndex < Opt->Inputs.size(),
+ "gc.relocate: statepoint base index out of bounds", Call);
+ Check(DerivedIndex < Opt->Inputs.size(),
+ "gc.relocate: statepoint derived index out of bounds", Call);
+ }
+
+ // Relocated value must be either a pointer type or vector-of-pointer type,
+ // but gc_relocate does not need to return the same pointer type as the
+ // relocated pointer. It can be casted to the correct type later if it's
+ // desired. However, they must have the same address space and 'vectorness'
+ GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
+ Check(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
+ "gc.relocate: relocated value must be a gc pointer", Call);
+
+ auto ResultType = Call.getType();
+ auto DerivedType = Relocate.getDerivedPtr()->getType();
+ Check(ResultType->isVectorTy() == DerivedType->isVectorTy(),
+ "gc.relocate: vector relocates to vector and pointer to pointer",
+ Call);
+ Check(
+ ResultType->getPointerAddressSpace() ==
+ DerivedType->getPointerAddressSpace(),
+ "gc.relocate: relocating a pointer shouldn't change its address space",
+ Call);
+ break;
+ }
+ case Intrinsic::eh_exceptioncode:
+ case Intrinsic::eh_exceptionpointer: {
+ Check(isa<CatchPadInst>(Call.getArgOperand(0)),
+ "eh.exceptionpointer argument must be a catchpad", Call);
+ break;
+ }
+ case Intrinsic::get_active_lane_mask: {
+ Check(Call.getType()->isVectorTy(),
+ "get_active_lane_mask: must return a "
+ "vector",
+ Call);
+ auto *ElemTy = Call.getType()->getScalarType();
+ Check(ElemTy->isIntegerTy(1),
+ "get_active_lane_mask: element type is not "
+ "i1",
+ Call);
+ break;
+ }
+ case Intrinsic::masked_load: {
+ Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
+ Call);
+
+ Value *Ptr = Call.getArgOperand(0);
+ ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
+ Value *Mask = Call.getArgOperand(2);
+ Value *PassThru = Call.getArgOperand(3);
+ Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
+ Call);
+ Check(Alignment->getValue().isPowerOf2(),
+ "masked_load: alignment must be a power of 2", Call);
+
+ PointerType *PtrTy = cast<PointerType>(Ptr->getType());
+ Check(PtrTy->isOpaqueOrPointeeTypeMatches(Call.getType()),
+ "masked_load: return must match pointer type", Call);
+ Check(PassThru->getType() == Call.getType(),
+ "masked_load: pass through and return type must match", Call);
+ Check(cast<VectorType>(Mask->getType())->getElementCount() ==
+ cast<VectorType>(Call.getType())->getElementCount(),
+ "masked_load: vector mask must be same length as return", Call);
+ break;
+ }
+ case Intrinsic::masked_store: {
+ Value *Val = Call.getArgOperand(0);
+ Value *Ptr = Call.getArgOperand(1);
+ ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
+ Value *Mask = Call.getArgOperand(3);
+ Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
+ Call);
+ Check(Alignment->getValue().isPowerOf2(),
+ "masked_store: alignment must be a power of 2", Call);
+
+ PointerType *PtrTy = cast<PointerType>(Ptr->getType());
+ Check(PtrTy->isOpaqueOrPointeeTypeMatches(Val->getType()),
+ "masked_store: storee must match pointer type", Call);
+ Check(cast<VectorType>(Mask->getType())->getElementCount() ==
+ cast<VectorType>(Val->getType())->getElementCount(),
+ "masked_store: vector mask must be same length as value", Call);
+ break;
+ }
+
+ case Intrinsic::masked_gather: {
+ const APInt &Alignment =
+ cast<ConstantInt>(Call.getArgOperand(1))->getValue();
+ Check(Alignment.isZero() || Alignment.isPowerOf2(),
+ "masked_gather: alignment must be 0 or a power of 2", Call);
+ break;
+ }
+ case Intrinsic::masked_scatter: {
+ const APInt &Alignment =
+ cast<ConstantInt>(Call.getArgOperand(2))->getValue();
+ Check(Alignment.isZero() || Alignment.isPowerOf2(),
+ "masked_scatter: alignment must be 0 or a power of 2", Call);
+ break;
+ }
+
+ case Intrinsic::experimental_guard: {
+ Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
+ Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+ "experimental_guard must have exactly one "
+ "\"deopt\" operand bundle");
+ break;
+ }
+
+ case Intrinsic::experimental_deoptimize: {
+ Check(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
+ Call);
+ Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+ "experimental_deoptimize must have exactly one "
+ "\"deopt\" operand bundle");
+ Check(Call.getType() == Call.getFunction()->getReturnType(),
+ "experimental_deoptimize return type must match caller return type");
+
+ if (isa<CallInst>(Call)) {
+ auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
+ Check(RI,
+ "calls to experimental_deoptimize must be followed by a return");
+
+ if (!Call.getType()->isVoidTy() && RI)
+ Check(RI->getReturnValue() == &Call,
+ "calls to experimental_deoptimize must be followed by a return "
+ "of the value computed by experimental_deoptimize");
+ }
+
+ break;
+ }
+ case Intrinsic::vector_reduce_and:
+ case Intrinsic::vector_reduce_or:
+ case Intrinsic::vector_reduce_xor:
+ case Intrinsic::vector_reduce_add:
+ case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_smax:
+ case Intrinsic::vector_reduce_smin:
+ case Intrinsic::vector_reduce_umax:
+ case Intrinsic::vector_reduce_umin: {
+ Type *ArgTy = Call.getArgOperand(0)->getType();
+ Check(ArgTy->isIntOrIntVectorTy() && ArgTy->isVectorTy(),
+ "Intrinsic has incorrect argument type!");
+ break;
+ }
+ case Intrinsic::vector_reduce_fmax:
+ case Intrinsic::vector_reduce_fmin: {
+ Type *ArgTy = Call.getArgOperand(0)->getType();
+ Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
+ "Intrinsic has incorrect argument type!");
+ break;
+ }
+ case Intrinsic::vector_reduce_fadd:
+ case Intrinsic::vector_reduce_fmul: {
+ // Unlike the other reductions, the first argument is a start value. The
+ // second argument is the vector to be reduced.
+ Type *ArgTy = Call.getArgOperand(1)->getType();
+ Check(ArgTy->isFPOrFPVectorTy() && ArgTy->isVectorTy(),
+ "Intrinsic has incorrect argument type!");
+ break;
+ }
+ case Intrinsic::smul_fix:
+ case Intrinsic::smul_fix_sat:
+ case Intrinsic::umul_fix:
+ case Intrinsic::umul_fix_sat:
+ case Intrinsic::sdiv_fix:
+ case Intrinsic::sdiv_fix_sat:
+ case Intrinsic::udiv_fix:
+ case Intrinsic::udiv_fix_sat: {
+ Value *Op1 = Call.getArgOperand(0);
+ Value *Op2 = Call.getArgOperand(1);
+ Check(Op1->getType()->isIntOrIntVectorTy(),
+ "first operand of [us][mul|div]_fix[_sat] must be an int type or "
+ "vector of ints");
+ Check(Op2->getType()->isIntOrIntVectorTy(),
+ "second operand of [us][mul|div]_fix[_sat] must be an int type or "
+ "vector of ints");
+
+ auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
+ Check(Op3->getType()->getBitWidth() <= 32,
+ "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");
+
+ if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
+ ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
+ Check(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
+ "the scale of s[mul|div]_fix[_sat] must be less than the width of "
+ "the operands");
+ } else {
+ Check(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
+ "the scale of u[mul|div]_fix[_sat] must be less than or equal "
+ "to the width of the operands");
+ }
+ break;
+ }
+ case Intrinsic::lround:
+ case Intrinsic::llround:
+ case Intrinsic::lrint:
+ case Intrinsic::llrint: {
+ Type *ValTy = Call.getArgOperand(0)->getType();
+ Type *ResultTy = Call.getType();
+ Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &Call);
+ break;
+ }
+ case Intrinsic::bswap: {
+ Type *Ty = Call.getType();
+ unsigned Size = Ty->getScalarSizeInBits();
+ Check(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
+ break;
+ }
+ case Intrinsic::invariant_start: {
+ ConstantInt *InvariantSize = dyn_cast<ConstantInt>(Call.getArgOperand(0));
+ Check(InvariantSize &&
+ (!InvariantSize->isNegative() || InvariantSize->isMinusOne()),
+ "invariant_start parameter must be -1, 0 or a positive number",
+ &Call);
+ break;
+ }
+ case Intrinsic::matrix_multiply:
+ case Intrinsic::matrix_transpose:
+ case Intrinsic::matrix_column_major_load:
+ case Intrinsic::matrix_column_major_store: {
+ Function *IF = Call.getCalledFunction();
+ ConstantInt *Stride = nullptr;
+ ConstantInt *NumRows;
+ ConstantInt *NumColumns;
+ VectorType *ResultTy;
+ Type *Op0ElemTy = nullptr;
+ Type *Op1ElemTy = nullptr;
+ switch (ID) {
+ case Intrinsic::matrix_multiply:
+ NumRows = cast<ConstantInt>(Call.getArgOperand(2));
+ NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
+ ResultTy = cast<VectorType>(Call.getType());
+ Op0ElemTy =
+ cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
+ Op1ElemTy =
+ cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
+ break;
+ case Intrinsic::matrix_transpose:
+ NumRows = cast<ConstantInt>(Call.getArgOperand(1));
+ NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
+ ResultTy = cast<VectorType>(Call.getType());
+ Op0ElemTy =
+ cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
+ break;
+ case Intrinsic::matrix_column_major_load: {
+ Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
+ NumRows = cast<ConstantInt>(Call.getArgOperand(3));
+ NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
+ ResultTy = cast<VectorType>(Call.getType());
+
+ PointerType *Op0PtrTy =
+ cast<PointerType>(Call.getArgOperand(0)->getType());
+ if (!Op0PtrTy->isOpaque())
+ Op0ElemTy = Op0PtrTy->getNonOpaquePointerElementType();
+ break;
+ }
+ case Intrinsic::matrix_column_major_store: {
+ Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
+ NumRows = cast<ConstantInt>(Call.getArgOperand(4));
+ NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
+ ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
+ Op0ElemTy =
+ cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
+
+ PointerType *Op1PtrTy =
+ cast<PointerType>(Call.getArgOperand(1)->getType());
+ if (!Op1PtrTy->isOpaque())
+ Op1ElemTy = Op1PtrTy->getNonOpaquePointerElementType();
+ break;
+ }
+ default:
+ llvm_unreachable("unexpected intrinsic");
+ }
+
+ Check(ResultTy->getElementType()->isIntegerTy() ||
+ ResultTy->getElementType()->isFloatingPointTy(),
+ "Result type must be an integer or floating-point type!", IF);
+
+ if (Op0ElemTy)
+ Check(ResultTy->getElementType() == Op0ElemTy,
+ "Vector element type mismatch of the result and first operand "
+ "vector!",
+ IF);
+
+ if (Op1ElemTy)
+ Check(ResultTy->getElementType() == Op1ElemTy,
+ "Vector element type mismatch of the result and second operand "
+ "vector!",
+ IF);
+
+ Check(cast<FixedVectorType>(ResultTy)->getNumElements() ==
+ NumRows->getZExtValue() * NumColumns->getZExtValue(),
+ "Result of a matrix operation does not fit in the returned vector!");
+
+ if (Stride)
+ Check(Stride->getZExtValue() >= NumRows->getZExtValue(),
+ "Stride must be greater or equal than the number of rows!", IF);
+
+ break;
+ }
+ case Intrinsic::experimental_vector_splice: {
+ VectorType *VecTy = cast<VectorType>(Call.getType());
+ int64_t Idx = cast<ConstantInt>(Call.getArgOperand(2))->getSExtValue();
+ int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
+ if (Call.getParent() && Call.getParent()->getParent()) {
+ AttributeList Attrs = Call.getParent()->getParent()->getAttributes();
+ if (Attrs.hasFnAttr(Attribute::VScaleRange))
+ KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
+ }
+ Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
+ (Idx >= 0 && Idx < KnownMinNumElements),
+ "The splice index exceeds the range [-VL, VL-1] where VL is the "
+ "known minimum number of elements in the vector. For scalable "
+ "vectors the minimum number of elements is determined from "
+ "vscale_range.",
+ &Call);
+ break;
+ }
+ case Intrinsic::experimental_stepvector: {
+ VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
+ Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
+ VecTy->getScalarSizeInBits() >= 8,
+ "experimental_stepvector only supported for vectors of integers "
+ "with a bitwidth of at least 8.",
+ &Call);
+ break;
+ }
+ case Intrinsic::vector_insert: {
+ Value *Vec = Call.getArgOperand(0);
+ Value *SubVec = Call.getArgOperand(1);
+ Value *Idx = Call.getArgOperand(2);
+ unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
+
+ VectorType *VecTy = cast<VectorType>(Vec->getType());
+ VectorType *SubVecTy = cast<VectorType>(SubVec->getType());
+
+ ElementCount VecEC = VecTy->getElementCount();
+ ElementCount SubVecEC = SubVecTy->getElementCount();
+ Check(VecTy->getElementType() == SubVecTy->getElementType(),
+ "vector_insert parameters must have the same element "
+ "type.",
+ &Call);
+ Check(IdxN % SubVecEC.getKnownMinValue() == 0,
+ "vector_insert index must be a constant multiple of "
+ "the subvector's known minimum vector length.");
+
+ // If this insertion is not the 'mixed' case where a fixed vector is
+ // inserted into a scalable vector, ensure that the insertion of the
+ // subvector does not overrun the parent vector.
+ if (VecEC.isScalable() == SubVecEC.isScalable()) {
+ Check(IdxN < VecEC.getKnownMinValue() &&
+ IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
+ "subvector operand of vector_insert would overrun the "
+ "vector being inserted into.");
+ }
+ break;
+ }
+ case Intrinsic::vector_extract: {
+ Value *Vec = Call.getArgOperand(0);
+ Value *Idx = Call.getArgOperand(1);
+ unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
+
+ VectorType *ResultTy = cast<VectorType>(Call.getType());
+ VectorType *VecTy = cast<VectorType>(Vec->getType());
+
+ ElementCount VecEC = VecTy->getElementCount();
+ ElementCount ResultEC = ResultTy->getElementCount();
+
+ Check(ResultTy->getElementType() == VecTy->getElementType(),
+ "vector_extract result must have the same element "
+ "type as the input vector.",
+ &Call);
+ Check(IdxN % ResultEC.getKnownMinValue() == 0,
+ "vector_extract index must be a constant multiple of "
+ "the result type's known minimum vector length.");
+
+ // If this extraction is not the 'mixed' case where a fixed vector is is
+ // extracted from a scalable vector, ensure that the extraction does not
+ // overrun the parent vector.
+ if (VecEC.isScalable() == ResultEC.isScalable()) {
+ Check(IdxN < VecEC.getKnownMinValue() &&
+ IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(),
+ "vector_extract would overrun.");
+ }
+ break;
+ }
+ case Intrinsic::experimental_noalias_scope_decl: {
+ NoAliasScopeDecls.push_back(cast<IntrinsicInst>(&Call));
+ break;
+ }
+ case Intrinsic::preserve_array_access_index:
+ case Intrinsic::preserve_struct_access_index:
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr:
+ case Intrinsic::arm_ldaex:
+ case Intrinsic::arm_ldrex: {
+ Type *ElemTy = Call.getParamElementType(0);
+ Check(ElemTy, "Intrinsic requires elementtype attribute on first argument.",
+ &Call);
+ break;
+ }
+ case Intrinsic::aarch64_stlxr:
+ case Intrinsic::aarch64_stxr:
+ case Intrinsic::arm_stlex:
+ case Intrinsic::arm_strex: {
+ Type *ElemTy = Call.getAttributes().getParamElementType(1);
+ Check(ElemTy,
+ "Intrinsic requires elementtype attribute on second argument.",
+ &Call);
+ break;
+ }
+ };
+}
+
+/// Carefully grab the subprogram from a local scope.
+///
+/// This carefully grabs the subprogram from a local scope, avoiding the
+/// built-in assertions that would typically fire.
+static DISubprogram *getSubprogram(Metadata *LocalScope) {
+ if (!LocalScope)
+ return nullptr;
+
+ if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
+ return SP;
+
+ if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
+ return getSubprogram(LB->getRawScope());
+
+ // Just return null; broken scope chains are checked elsewhere.
+ assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
+ return nullptr;
+}
+
+void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
+ if (auto *VPCast = dyn_cast<VPCastIntrinsic>(&VPI)) {
+ auto *RetTy = cast<VectorType>(VPCast->getType());
+ auto *ValTy = cast<VectorType>(VPCast->getOperand(0)->getType());
+ Check(RetTy->getElementCount() == ValTy->getElementCount(),
+ "VP cast intrinsic first argument and result vector lengths must be "
+ "equal",
+ *VPCast);
+
+ switch (VPCast->getIntrinsicID()) {
+ default:
+ llvm_unreachable("Unknown VP cast intrinsic");
+ case Intrinsic::vp_trunc:
+ Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
+ "llvm.vp.trunc intrinsic first argument and result element type "
+ "must be integer",
+ *VPCast);
+ Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
+ "llvm.vp.trunc intrinsic the bit size of first argument must be "
+ "larger than the bit size of the return type",
+ *VPCast);
+ break;
+ case Intrinsic::vp_zext:
+ case Intrinsic::vp_sext:
+ Check(RetTy->isIntOrIntVectorTy() && ValTy->isIntOrIntVectorTy(),
+ "llvm.vp.zext or llvm.vp.sext intrinsic first argument and result "
+ "element type must be integer",
+ *VPCast);
+ Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
+ "llvm.vp.zext or llvm.vp.sext intrinsic the bit size of first "
+ "argument must be smaller than the bit size of the return type",
+ *VPCast);
+ break;
+ case Intrinsic::vp_fptoui:
+ case Intrinsic::vp_fptosi:
+ Check(
+ RetTy->isIntOrIntVectorTy() && ValTy->isFPOrFPVectorTy(),
+ "llvm.vp.fptoui or llvm.vp.fptosi intrinsic first argument element "
+ "type must be floating-point and result element type must be integer",
+ *VPCast);
+ break;
+ case Intrinsic::vp_uitofp:
+ case Intrinsic::vp_sitofp:
+ Check(
+ RetTy->isFPOrFPVectorTy() && ValTy->isIntOrIntVectorTy(),
+ "llvm.vp.uitofp or llvm.vp.sitofp intrinsic first argument element "
+ "type must be integer and result element type must be floating-point",
+ *VPCast);
+ break;
+ case Intrinsic::vp_fptrunc:
+ Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
+ "llvm.vp.fptrunc intrinsic first argument and result element type "
+ "must be floating-point",
+ *VPCast);
+ Check(RetTy->getScalarSizeInBits() < ValTy->getScalarSizeInBits(),
+ "llvm.vp.fptrunc intrinsic the bit size of first argument must be "
+ "larger than the bit size of the return type",
+ *VPCast);
+ break;
+ case Intrinsic::vp_fpext:
+ Check(RetTy->isFPOrFPVectorTy() && ValTy->isFPOrFPVectorTy(),
+ "llvm.vp.fpext intrinsic first argument and result element type "
+ "must be floating-point",
+ *VPCast);
+ Check(RetTy->getScalarSizeInBits() > ValTy->getScalarSizeInBits(),
+ "llvm.vp.fpext intrinsic the bit size of first argument must be "
+ "smaller than the bit size of the return type",
+ *VPCast);
+ break;
+ case Intrinsic::vp_ptrtoint:
+ Check(RetTy->isIntOrIntVectorTy() && ValTy->isPtrOrPtrVectorTy(),
+ "llvm.vp.ptrtoint intrinsic first argument element type must be "
+ "pointer and result element type must be integer",
+ *VPCast);
+ break;
+ case Intrinsic::vp_inttoptr:
+ Check(RetTy->isPtrOrPtrVectorTy() && ValTy->isIntOrIntVectorTy(),
+ "llvm.vp.inttoptr intrinsic first argument element type must be "
+ "integer and result element type must be pointer",
+ *VPCast);
+ break;
+ }
+ }
+ if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
+ auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
+ Check(CmpInst::isFPPredicate(Pred),
+ "invalid predicate for VP FP comparison intrinsic", &VPI);
+ }
+ if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
+ auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
+ Check(CmpInst::isIntPredicate(Pred),
+ "invalid predicate for VP integer comparison intrinsic", &VPI);
+ }
+}
+
+void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
+ unsigned NumOperands;
+ bool HasRoundingMD;
+ switch (FPI.getIntrinsicID()) {
+#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
+ case Intrinsic::INTRINSIC: \
+ NumOperands = NARG; \
+ HasRoundingMD = ROUND_MODE; \
+ break;
+#include "llvm/IR/ConstrainedOps.def"
+ default:
+ llvm_unreachable("Invalid constrained FP intrinsic!");
+ }
+ NumOperands += (1 + HasRoundingMD);
+ // Compare intrinsics carry an extra predicate metadata operand.
+ if (isa<ConstrainedFPCmpIntrinsic>(FPI))
+ NumOperands += 1;
+ Check((FPI.arg_size() == NumOperands),
+ "invalid arguments for constrained FP intrinsic", &FPI);
+
+ switch (FPI.getIntrinsicID()) {
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint: {
+ Type *ValTy = FPI.getArgOperand(0)->getType();
+ Type *ResultTy = FPI.getType();
+ Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &FPI);
+ }
+ break;
+
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround: {
+ Type *ValTy = FPI.getArgOperand(0)->getType();
+ Type *ResultTy = FPI.getType();
+ Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &FPI);
+ break;
+ }
+
+ case Intrinsic::experimental_constrained_fcmp:
+ case Intrinsic::experimental_constrained_fcmps: {
+ auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
+ Check(CmpInst::isFPPredicate(Pred),
+ "invalid predicate for constrained FP comparison intrinsic", &FPI);
+ break;
+ }
+
+ case Intrinsic::experimental_constrained_fptosi:
+ case Intrinsic::experimental_constrained_fptoui: {
+ Value *Operand = FPI.getArgOperand(0);
+ uint64_t NumSrcElem = 0;
+ Check(Operand->getType()->isFPOrFPVectorTy(),
+ "Intrinsic first argument must be floating point", &FPI);
+ if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
+ NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
+ }
+
+ Operand = &FPI;
+ Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ "Intrinsic first argument and result disagree on vector use", &FPI);
+ Check(Operand->getType()->isIntOrIntVectorTy(),
+ "Intrinsic result must be an integer", &FPI);
+ if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
+ Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
+ "Intrinsic first argument and result vector lengths must be equal",
+ &FPI);
+ }
+ }
+ break;
+
+ case Intrinsic::experimental_constrained_sitofp:
+ case Intrinsic::experimental_constrained_uitofp: {
+ Value *Operand = FPI.getArgOperand(0);
+ uint64_t NumSrcElem = 0;
+ Check(Operand->getType()->isIntOrIntVectorTy(),
+ "Intrinsic first argument must be integer", &FPI);
+ if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
+ NumSrcElem = cast<FixedVectorType>(OperandT)->getNumElements();
+ }
+
+ Operand = &FPI;
+ Check((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ "Intrinsic first argument and result disagree on vector use", &FPI);
+ Check(Operand->getType()->isFPOrFPVectorTy(),
+ "Intrinsic result must be a floating point", &FPI);
+ if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
+ Check(NumSrcElem == cast<FixedVectorType>(OperandT)->getNumElements(),
+ "Intrinsic first argument and result vector lengths must be equal",
+ &FPI);
+ }
+ } break;
+
+ case Intrinsic::experimental_constrained_fptrunc:
+ case Intrinsic::experimental_constrained_fpext: {
+ Value *Operand = FPI.getArgOperand(0);
+ Type *OperandTy = Operand->getType();
+ Value *Result = &FPI;
+ Type *ResultTy = Result->getType();
+ Check(OperandTy->isFPOrFPVectorTy(),
+ "Intrinsic first argument must be FP or FP vector", &FPI);
+ Check(ResultTy->isFPOrFPVectorTy(),
+ "Intrinsic result must be FP or FP vector", &FPI);
+ Check(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
+ "Intrinsic first argument and result disagree on vector use", &FPI);
+ if (OperandTy->isVectorTy()) {
+ Check(cast<FixedVectorType>(OperandTy)->getNumElements() ==
+ cast<FixedVectorType>(ResultTy)->getNumElements(),
+ "Intrinsic first argument and result vector lengths must be equal",
+ &FPI);
+ }
+ if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
+ Check(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
+ "Intrinsic first argument's type must be larger than result type",
+ &FPI);
+ } else {
+ Check(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
+ "Intrinsic first argument's type must be smaller than result type",
+ &FPI);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ // If a non-metadata argument is passed in a metadata slot then the
+ // error will be caught earlier when the incorrect argument doesn't
+ // match the specification in the intrinsic call table. Thus, no
+ // argument type check is needed here.
+
+ Check(FPI.getExceptionBehavior().has_value(),
+ "invalid exception behavior argument", &FPI);
+ if (HasRoundingMD) {
+ Check(FPI.getRoundingMode().has_value(), "invalid rounding mode argument",
+ &FPI);
+ }
+}
+
+void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
+ auto *MD = DII.getRawLocation();
+ CheckDI(isa<ValueAsMetadata>(MD) || isa<DIArgList>(MD) ||
+ (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
+ "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
+ CheckDI(isa<DILocalVariable>(DII.getRawVariable()),
+ "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
+ DII.getRawVariable());
+ CheckDI(isa<DIExpression>(DII.getRawExpression()),
+ "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
+ DII.getRawExpression());
+
+ // Ignore broken !dbg attachments; they're checked elsewhere.
+ if (MDNode *N = DII.getDebugLoc().getAsMDNode())
+ if (!isa<DILocation>(N))
+ return;
+
+ BasicBlock *BB = DII.getParent();
+ Function *F = BB ? BB->getParent() : nullptr;
+
+ // The scopes for variables and !dbg attachments must agree.
+ DILocalVariable *Var = DII.getVariable();
+ DILocation *Loc = DII.getDebugLoc();
+ CheckDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
+ &DII, BB, F);
+
+ DISubprogram *VarSP = getSubprogram(Var->getRawScope());
+ DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
+ if (!VarSP || !LocSP)
+ return; // Broken scope chains are checked elsewhere.
+
+ CheckDI(VarSP == LocSP,
+ "mismatched subprogram between llvm.dbg." + Kind +
+ " variable and !dbg attachment",
+ &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
+ Loc->getScope()->getSubprogram());
+
+ // This check is redundant with one in visitLocalVariable().
+ CheckDI(isType(Var->getRawType()), "invalid type ref", Var,
+ Var->getRawType());
+ verifyFnArgs(DII);
+}
+
+void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
+ CheckDI(isa<DILabel>(DLI.getRawLabel()),
+ "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
+ DLI.getRawLabel());
+
+ // Ignore broken !dbg attachments; they're checked elsewhere.
+ if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
+ if (!isa<DILocation>(N))
+ return;
+
+ BasicBlock *BB = DLI.getParent();
+ Function *F = BB ? BB->getParent() : nullptr;
+
+ // The scopes for variables and !dbg attachments must agree.
+ DILabel *Label = DLI.getLabel();
+ DILocation *Loc = DLI.getDebugLoc();
+ Check(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment", &DLI,
+ BB, F);
+
+ DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
+ DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
+ if (!LabelSP || !LocSP)
+ return;
+
+ CheckDI(LabelSP == LocSP,
+ "mismatched subprogram between llvm.dbg." + Kind +
+ " label and !dbg attachment",
+ &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
+ Loc->getScope()->getSubprogram());
+}
+
+void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
+ DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
+ DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
+
+ // We don't know whether this intrinsic verified correctly.
+ if (!V || !E || !E->isValid())
+ return;
+
+ // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
+ auto Fragment = E->getFragmentInfo();
+ if (!Fragment)
+ return;
+
+ // The frontend helps out GDB by emitting the members of local anonymous
+ // unions as artificial local variables with shared storage. When SROA splits
+ // the storage for artificial local variables that are smaller than the entire
+ // union, the overhang piece will be outside of the allotted space for the
+ // variable and this check fails.
+ // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
+ if (V->isArtificial())
+ return;
+
+ verifyFragmentExpression(*V, *Fragment, &I);
+}
+
+template <typename ValueOrMetadata>
+void Verifier::verifyFragmentExpression(const DIVariable &V,
+ DIExpression::FragmentInfo Fragment,
+ ValueOrMetadata *Desc) {
+ // If there's no size, the type is broken, but that should be checked
+ // elsewhere.
+ auto VarSize = V.getSizeInBits();
+ if (!VarSize)
+ return;
+
+ unsigned FragSize = Fragment.SizeInBits;
+ unsigned FragOffset = Fragment.OffsetInBits;
+ CheckDI(FragSize + FragOffset <= *VarSize,
+ "fragment is larger than or outside of variable", Desc, &V);
+ CheckDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
+}
+
+void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
+ // This function does not take the scope of noninlined function arguments into
+ // account. Don't run it if current function is nodebug, because it may
+ // contain inlined debug intrinsics.
+ if (!HasDebugInfo)
+ return;
+
+ // For performance reasons only check non-inlined ones.
+ if (I.getDebugLoc()->getInlinedAt())
+ return;
+
+ DILocalVariable *Var = I.getVariable();
+ CheckDI(Var, "dbg intrinsic without variable");
+
+ unsigned ArgNo = Var->getArg();
+ if (!ArgNo)
+ return;
+
+ // Verify there are no duplicate function argument debug info entries.
+ // These will cause hard-to-debug assertions in the DWARF backend.
+ if (DebugFnArgs.size() < ArgNo)
+ DebugFnArgs.resize(ArgNo, nullptr);
+
+ auto *Prev = DebugFnArgs[ArgNo - 1];
+ DebugFnArgs[ArgNo - 1] = Var;
+ CheckDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
+ Prev, Var);
+}
+
+void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
+ DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
+
+ // We don't know whether this intrinsic verified correctly.
+ if (!E || !E->isValid())
+ return;
+
+ CheckDI(!E->isEntryValue(), "Entry values are only allowed in MIR", &I);
+}
+
+void Verifier::verifyCompileUnits() {
+ // When more than one Module is imported into the same context, such as during
+ // an LTO build before linking the modules, ODR type uniquing may cause types
+ // to point to a different CU. This check does not make sense in this case.
+ if (M.getContext().isODRUniquingDebugTypes())
+ return;
+ auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
+ SmallPtrSet<const Metadata *, 2> Listed;
+ if (CUs)
+ Listed.insert(CUs->op_begin(), CUs->op_end());
+ for (auto *CU : CUVisited)
+ CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
+ CUVisited.clear();
+}
+
+void Verifier::verifyDeoptimizeCallingConvs() {
+ if (DeoptimizeDeclarations.empty())
+ return;
+
+ const Function *First = DeoptimizeDeclarations[0];
+ for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
+ Check(First->getCallingConv() == F->getCallingConv(),
+ "All llvm.experimental.deoptimize declarations must have the same "
+ "calling convention",
+ First, F);
+ }
+}
+
+void Verifier::verifyAttachedCallBundle(const CallBase &Call,
+ const OperandBundleUse &BU) {
+ FunctionType *FTy = Call.getFunctionType();
+
+ Check((FTy->getReturnType()->isPointerTy() ||
+ (Call.doesNotReturn() && FTy->getReturnType()->isVoidTy())),
+ "a call with operand bundle \"clang.arc.attachedcall\" must call a "
+ "function returning a pointer or a non-returning function that has a "
+ "void return type",
+ Call);
+
+ Check(BU.Inputs.size() == 1 && isa<Function>(BU.Inputs.front()),
+ "operand bundle \"clang.arc.attachedcall\" requires one function as "
+ "an argument",
+ Call);
+
+ auto *Fn = cast<Function>(BU.Inputs.front());
+ Intrinsic::ID IID = Fn->getIntrinsicID();
+
+ if (IID) {
+ Check((IID == Intrinsic::objc_retainAutoreleasedReturnValue ||
+ IID == Intrinsic::objc_unsafeClaimAutoreleasedReturnValue),
+ "invalid function argument", Call);
+ } else {
+ StringRef FnName = Fn->getName();
+ Check((FnName == "objc_retainAutoreleasedReturnValue" ||
+ FnName == "objc_unsafeClaimAutoreleasedReturnValue"),
+ "invalid function argument", Call);
+ }
+}
+
+void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
+ bool HasSource = F.getSource().has_value();
+ if (!HasSourceDebugInfo.count(&U))
+ HasSourceDebugInfo[&U] = HasSource;
+ CheckDI(HasSource == HasSourceDebugInfo[&U],
+ "inconsistent use of embedded source");
+}
+
+void Verifier::verifyNoAliasScopeDecl() {
+ if (NoAliasScopeDecls.empty())
+ return;
+
+ // only a single scope must be declared at a time.
+ for (auto *II : NoAliasScopeDecls) {
+ assert(II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl &&
+ "Not a llvm.experimental.noalias.scope.decl ?");
+ const auto *ScopeListMV = dyn_cast<MetadataAsValue>(
+ II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
+ Check(ScopeListMV != nullptr,
+ "llvm.experimental.noalias.scope.decl must have a MetadataAsValue "
+ "argument",
+ II);
+
+ const auto *ScopeListMD = dyn_cast<MDNode>(ScopeListMV->getMetadata());
+ Check(ScopeListMD != nullptr, "!id.scope.list must point to an MDNode", II);
+ Check(ScopeListMD->getNumOperands() == 1,
+ "!id.scope.list must point to a list with a single scope", II);
+ visitAliasScopeListMetadata(ScopeListMD);
+ }
+
+ // Only check the domination rule when requested. Once all passes have been
+ // adapted this option can go away.
+ if (!VerifyNoAliasScopeDomination)
+ return;
+
+ // Now sort the intrinsics based on the scope MDNode so that declarations of
+ // the same scopes are next to each other.
+ auto GetScope = [](IntrinsicInst *II) {
+ const auto *ScopeListMV = cast<MetadataAsValue>(
+ II->getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
+ return &cast<MDNode>(ScopeListMV->getMetadata())->getOperand(0);
+ };
+
+ // We are sorting on MDNode pointers here. For valid input IR this is ok.
+ // TODO: Sort on Metadata ID to avoid non-deterministic error messages.
+ auto Compare = [GetScope](IntrinsicInst *Lhs, IntrinsicInst *Rhs) {
+ return GetScope(Lhs) < GetScope(Rhs);
+ };
+
+ llvm::sort(NoAliasScopeDecls, Compare);
+
+ // Go over the intrinsics and check that for the same scope, they are not
+ // dominating each other.
+ auto ItCurrent = NoAliasScopeDecls.begin();
+ while (ItCurrent != NoAliasScopeDecls.end()) {
+ auto CurScope = GetScope(*ItCurrent);
+ auto ItNext = ItCurrent;
+ do {
+ ++ItNext;
+ } while (ItNext != NoAliasScopeDecls.end() &&
+ GetScope(*ItNext) == CurScope);
+
+ // [ItCurrent, ItNext) represents the declarations for the same scope.
+ // Ensure they are not dominating each other.. but only if it is not too
+ // expensive.
+ if (ItNext - ItCurrent < 32)
+ for (auto *I : llvm::make_range(ItCurrent, ItNext))
+ for (auto *J : llvm::make_range(ItCurrent, ItNext))
+ if (I != J)
+ Check(!DT.dominates(I, J),
+ "llvm.experimental.noalias.scope.decl dominates another one "
+ "with the same scope",
+ I);
+ ItCurrent = ItNext;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Implement the public interfaces to this file...
+//===----------------------------------------------------------------------===//
+
+bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
+ Function &F = const_cast<Function &>(f);
+
+ // Don't use a raw_null_ostream. Printing IR is expensive.
+ Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
+
+ // Note that this function's return value is inverted from what you would
+ // expect of a function called "verify".
+ return !V.verify(F);
+}
+
+bool llvm::verifyModule(const Module &M, raw_ostream *OS,
+ bool *BrokenDebugInfo) {
+ // Don't use a raw_null_ostream. Printing IR is expensive.
+ Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
+
+ bool Broken = false;
+ for (const Function &F : M)
+ Broken |= !V.verify(F);
+
+ Broken |= !V.verify();
+ if (BrokenDebugInfo)
+ *BrokenDebugInfo = V.hasBrokenDebugInfo();
+ // Note that this function's return value is inverted from what you would
+ // expect of a function called "verify".
+ return Broken;
+}
+
+namespace {
+
+struct VerifierLegacyPass : public FunctionPass {
+ static char ID;
+
+ std::unique_ptr<Verifier> V;
+ bool FatalErrors = true;
+
+ VerifierLegacyPass() : FunctionPass(ID) {
+ initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+ explicit VerifierLegacyPass(bool FatalErrors)
+ : FunctionPass(ID),
+ FatalErrors(FatalErrors) {
+ initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool doInitialization(Module &M) override {
+ V = std::make_unique<Verifier>(
+ &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
+ return false;
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (!V->verify(F) && FatalErrors) {
+ errs() << "in function " << F.getName() << '\n';
+ report_fatal_error("Broken function found, compilation aborted!");
+ }
+ return false;
+ }
+
+ bool doFinalization(Module &M) override {
+ bool HasErrors = false;
+ for (Function &F : M)
+ if (F.isDeclaration())
+ HasErrors |= !V->verify(F);
+
+ HasErrors |= !V->verify();
+ if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
+ report_fatal_error("Broken module found, compilation aborted!");
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+} // end anonymous namespace
+
+/// Helper to issue failure from the TBAA verification
+template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
+ if (Diagnostic)
+ return Diagnostic->CheckFailed(Args...);
+}
+
+#define CheckTBAA(C, ...) \
+ do { \
+ if (!(C)) { \
+ CheckFailed(__VA_ARGS__); \
+ return false; \
+ } \
+ } while (false)
+
+/// Verify that \p BaseNode can be used as the "base type" in the struct-path
+/// TBAA scheme. This means \p BaseNode is either a scalar node, or a
+/// struct-type node describing an aggregate data structure (like a struct).
+TBAAVerifier::TBAABaseNodeSummary
+TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
+ bool IsNewFormat) {
+ if (BaseNode->getNumOperands() < 2) {
+ CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
+ return {true, ~0u};
+ }
+
+ auto Itr = TBAABaseNodes.find(BaseNode);
+ if (Itr != TBAABaseNodes.end())
+ return Itr->second;
+
+ auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
+ auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
+ (void)InsertResult;
+ assert(InsertResult.second && "We just checked!");
+ return Result;
+}
+
+TBAAVerifier::TBAABaseNodeSummary
+TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
+ bool IsNewFormat) {
+ const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
+
+ if (BaseNode->getNumOperands() == 2) {
+ // Scalar nodes can only be accessed at offset 0.
+ return isValidScalarTBAANode(BaseNode)
+ ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
+ : InvalidNode;
+ }
+
+ if (IsNewFormat) {
+ if (BaseNode->getNumOperands() % 3 != 0) {
+ CheckFailed("Access tag nodes must have the number of operands that is a "
+ "multiple of 3!", BaseNode);
+ return InvalidNode;
+ }
+ } else {
+ if (BaseNode->getNumOperands() % 2 != 1) {
+ CheckFailed("Struct tag nodes must have an odd number of operands!",
+ BaseNode);
+ return InvalidNode;
+ }
+ }
+
+ // Check the type size field.
+ if (IsNewFormat) {
+ auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
+ BaseNode->getOperand(1));
+ if (!TypeSizeNode) {
+ CheckFailed("Type size nodes must be constants!", &I, BaseNode);
+ return InvalidNode;
+ }
+ }
+
+ // Check the type name field. In the new format it can be anything.
+ if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
+ CheckFailed("Struct tag nodes have a string as their first operand",
+ BaseNode);
+ return InvalidNode;
+ }
+
+ bool Failed = false;
+
+ Optional<APInt> PrevOffset;
+ unsigned BitWidth = ~0u;
+
+ // We've already checked that BaseNode is not a degenerate root node with one
+ // operand in \c verifyTBAABaseNode, so this loop should run at least once.
+ unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
+ unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
+ for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
+ Idx += NumOpsPerField) {
+ const MDOperand &FieldTy = BaseNode->getOperand(Idx);
+ const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
+ if (!isa<MDNode>(FieldTy)) {
+ CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
+ Failed = true;
+ continue;
+ }
+
+ auto *OffsetEntryCI =
+ mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
+ if (!OffsetEntryCI) {
+ CheckFailed("Offset entries must be constants!", &I, BaseNode);
+ Failed = true;
+ continue;
+ }
+
+ if (BitWidth == ~0u)
+ BitWidth = OffsetEntryCI->getBitWidth();
+
+ if (OffsetEntryCI->getBitWidth() != BitWidth) {
+ CheckFailed(
+ "Bitwidth between the offsets and struct type entries must match", &I,
+ BaseNode);
+ Failed = true;
+ continue;
+ }
+
+ // NB! As far as I can tell, we generate a non-strictly increasing offset
+ // sequence only from structs that have zero size bit fields. When
+ // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
+ // pick the field lexically the latest in struct type metadata node. This
+ // mirrors the actual behavior of the alias analysis implementation.
+ bool IsAscending =
+ !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
+
+ if (!IsAscending) {
+ CheckFailed("Offsets must be increasing!", &I, BaseNode);
+ Failed = true;
+ }
+
+ PrevOffset = OffsetEntryCI->getValue();
+
+ if (IsNewFormat) {
+ auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
+ BaseNode->getOperand(Idx + 2));
+ if (!MemberSizeNode) {
+ CheckFailed("Member size entries must be constants!", &I, BaseNode);
+ Failed = true;
+ continue;
+ }
+ }
+ }
+
+ return Failed ? InvalidNode
+ : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
+}
+
+static bool IsRootTBAANode(const MDNode *MD) {
+ return MD->getNumOperands() < 2;
+}
+
+static bool IsScalarTBAANodeImpl(const MDNode *MD,
+ SmallPtrSetImpl<const MDNode *> &Visited) {
+ if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
+ return false;
+
+ if (!isa<MDString>(MD->getOperand(0)))
+ return false;
+
+ if (MD->getNumOperands() == 3) {
+ auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
+ if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
+ return false;
+ }
+
+ auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
+ return Parent && Visited.insert(Parent).second &&
+ (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
+}
+
+bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
+ auto ResultIt = TBAAScalarNodes.find(MD);
+ if (ResultIt != TBAAScalarNodes.end())
+ return ResultIt->second;
+
+ SmallPtrSet<const MDNode *, 4> Visited;
+ bool Result = IsScalarTBAANodeImpl(MD, Visited);
+ auto InsertResult = TBAAScalarNodes.insert({MD, Result});
+ (void)InsertResult;
+ assert(InsertResult.second && "Just checked!");
+
+ return Result;
+}
+
+/// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
+/// Offset in place to be the offset within the field node returned.
+///
+/// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
+MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
+ const MDNode *BaseNode,
+ APInt &Offset,
+ bool IsNewFormat) {
+ assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
+
+ // Scalar nodes have only one possible "field" -- their parent in the access
+ // hierarchy. Offset must be zero at this point, but our caller is supposed
+ // to check that.
+ if (BaseNode->getNumOperands() == 2)
+ return cast<MDNode>(BaseNode->getOperand(1));
+
+ unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
+ unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
+ for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
+ Idx += NumOpsPerField) {
+ auto *OffsetEntryCI =
+ mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
+ if (OffsetEntryCI->getValue().ugt(Offset)) {
+ if (Idx == FirstFieldOpNo) {
+ CheckFailed("Could not find TBAA parent in struct type node", &I,
+ BaseNode, &Offset);
+ return nullptr;
+ }
+
+ unsigned PrevIdx = Idx - NumOpsPerField;
+ auto *PrevOffsetEntryCI =
+ mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
+ Offset -= PrevOffsetEntryCI->getValue();
+ return cast<MDNode>(BaseNode->getOperand(PrevIdx));
+ }
+ }
+
+ unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
+ auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
+ BaseNode->getOperand(LastIdx + 1));
+ Offset -= LastOffsetEntryCI->getValue();
+ return cast<MDNode>(BaseNode->getOperand(LastIdx));
+}
+
+static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
+ if (!Type || Type->getNumOperands() < 3)
+ return false;
+
+ // In the new format type nodes shall have a reference to the parent type as
+ // its first operand.
+ return isa_and_nonnull<MDNode>(Type->getOperand(0));
+}
+
+bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
+ CheckTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
+ isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
+ isa<AtomicCmpXchgInst>(I),
+ "This instruction shall not have a TBAA access tag!", &I);
+
+ bool IsStructPathTBAA =
+ isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
+
+ CheckTBAA(IsStructPathTBAA,
+ "Old-style TBAA is no longer allowed, use struct-path TBAA instead",
+ &I);
+
+ MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
+ MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
+
+ bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
+
+ if (IsNewFormat) {
+ CheckTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
+ "Access tag metadata must have either 4 or 5 operands", &I, MD);
+ } else {
+ CheckTBAA(MD->getNumOperands() < 5,
+ "Struct tag metadata must have either 3 or 4 operands", &I, MD);
+ }
+
+ // Check the access size field.
+ if (IsNewFormat) {
+ auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
+ MD->getOperand(3));
+ CheckTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
+ }
+
+ // Check the immutability flag.
+ unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
+ if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
+ auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
+ MD->getOperand(ImmutabilityFlagOpNo));
+ CheckTBAA(IsImmutableCI,
+ "Immutability tag on struct tag metadata must be a constant", &I,
+ MD);
+ CheckTBAA(
+ IsImmutableCI->isZero() || IsImmutableCI->isOne(),
+ "Immutability part of the struct tag metadata must be either 0 or 1",
+ &I, MD);
+ }
+
+ CheckTBAA(BaseNode && AccessType,
+ "Malformed struct tag metadata: base and access-type "
+ "should be non-null and point to Metadata nodes",
+ &I, MD, BaseNode, AccessType);
+
+ if (!IsNewFormat) {
+ CheckTBAA(isValidScalarTBAANode(AccessType),
+ "Access type node must be a valid scalar type", &I, MD,
+ AccessType);
+ }
+
+ auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
+ CheckTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
+
+ APInt Offset = OffsetCI->getValue();
+ bool SeenAccessTypeInPath = false;
+
+ SmallPtrSet<MDNode *, 4> StructPath;
+
+ for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
+ BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
+ IsNewFormat)) {
+ if (!StructPath.insert(BaseNode).second) {
+ CheckFailed("Cycle detected in struct path", &I, MD);
+ return false;
+ }
+
+ bool Invalid;
+ unsigned BaseNodeBitWidth;
+ std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
+ IsNewFormat);
+
+ // If the base node is invalid in itself, then we've already printed all the
+ // errors we wanted to print.
+ if (Invalid)
+ return false;
+
+ SeenAccessTypeInPath |= BaseNode == AccessType;
+
+ if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
+ CheckTBAA(Offset == 0, "Offset not zero at the point of scalar access",
+ &I, MD, &Offset);
+
+ CheckTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
+ (BaseNodeBitWidth == 0 && Offset == 0) ||
+ (IsNewFormat && BaseNodeBitWidth == ~0u),
+ "Access bit-width not the same as description bit-width", &I, MD,
+ BaseNodeBitWidth, Offset.getBitWidth());
+
+ if (IsNewFormat && SeenAccessTypeInPath)
+ break;
+ }
+
+ CheckTBAA(SeenAccessTypeInPath, "Did not see access type in access path!", &I,
+ MD);
+ return true;
+}
+
+char VerifierLegacyPass::ID = 0;
+INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
+
+FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
+ return new VerifierLegacyPass(FatalErrors);
+}
+
+AnalysisKey VerifierAnalysis::Key;
+VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
+ ModuleAnalysisManager &) {
+ Result Res;
+ Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
+ return Res;
+}
+
+VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
+ FunctionAnalysisManager &) {
+ return { llvm::verifyFunction(F, &dbgs()), false };
+}
+
+PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
+ auto Res = AM.getResult<VerifierAnalysis>(M);
+ if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
+ report_fatal_error("Broken module found, compilation aborted!");
+
+ return PreservedAnalyses::all();
+}
+
+PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
+ auto res = AM.getResult<VerifierAnalysis>(F);
+ if (res.IRBroken && FatalErrors)
+ report_fatal_error("Broken function found, compilation aborted!");
+
+ return PreservedAnalyses::all();
+}