aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h')
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h1247
1 files changed, 918 insertions, 329 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index 95c0b7b4d7c0..60e6841e1b3d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -26,6 +26,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/StmtOpenACC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
@@ -38,29 +39,29 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <optional>
namespace llvm {
class BasicBlock;
class LLVMContext;
class MDNode;
-class Module;
class SwitchInst;
class Twine;
class Value;
+class CanonicalLoopInfo;
}
namespace clang {
class ASTContext;
-class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
class Decl;
class LabelDecl;
-class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
class LabelStmt;
@@ -79,7 +80,6 @@ class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
class OMPUseDevicePtrClause;
class OMPUseDeviceAddrClause;
-class ReturnsNonNullAttr;
class SVETypeFlags;
class OMPExecutableDirective;
@@ -91,12 +91,10 @@ namespace CodeGen {
class CodeGenTypes;
class CGCallee;
class CGFunctionInfo;
-class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
class BlockByrefHelpers;
class BlockByrefInfo;
-class BlockFlags;
class BlockFieldFlags;
class RegionCodeGenTy;
class TargetCodeGenInfo;
@@ -121,7 +119,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
- SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
+ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
@@ -139,7 +137,8 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
- SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
+ SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
+ SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
enum SanitizerHandler {
#define SANITIZER_CHECK(Enum, Name, Version) Enum,
@@ -154,6 +153,9 @@ struct DominatingLLVMValue {
/// Answer whether the given value needs extra work to be saved.
static bool needsSaving(llvm::Value *value) {
+ if (!value)
+ return false;
+
// If it's not an instruction, we don't need to save.
if (!isa<llvm::Instruction>(value)) return false;
@@ -180,20 +182,28 @@ template <> struct DominatingValue<Address> {
typedef Address type;
struct saved_type {
- DominatingLLVMValue::saved_type SavedValue;
+ DominatingLLVMValue::saved_type BasePtr;
+ llvm::Type *ElementType;
CharUnits Alignment;
+ DominatingLLVMValue::saved_type Offset;
+ llvm::PointerType *EffectiveType;
};
static bool needsSaving(type value) {
- return DominatingLLVMValue::needsSaving(value.getPointer());
+ if (DominatingLLVMValue::needsSaving(value.getBasePointer()) ||
+ DominatingLLVMValue::needsSaving(value.getOffset()))
+ return true;
+ return false;
}
static saved_type save(CodeGenFunction &CGF, type value) {
- return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getAlignment() };
+ return {DominatingLLVMValue::save(CGF, value.getBasePointer()),
+ value.getElementType(), value.getAlignment(),
+ DominatingLLVMValue::save(CGF, value.getOffset()), value.getType()};
}
static type restore(CodeGenFunction &CGF, saved_type value) {
- return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.Alignment);
+ return Address(DominatingLLVMValue::restore(CGF, value.BasePtr),
+ value.ElementType, value.Alignment, CGPointerAuthInfo(),
+ DominatingLLVMValue::restore(CGF, value.Offset));
}
};
@@ -203,12 +213,24 @@ template <> struct DominatingValue<RValue> {
class saved_type {
enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
AggregateAddress, ComplexAddress };
-
- llvm::Value *Value;
+ union {
+ struct {
+ DominatingLLVMValue::saved_type first, second;
+ } Vals;
+ DominatingValue<Address>::saved_type AggregateAddr;
+ };
+ LLVM_PREFERRED_TYPE(Kind)
unsigned K : 3;
- unsigned Align : 29;
- saved_type(llvm::Value *v, Kind k, unsigned a = 0)
- : Value(v), K(k), Align(a) {}
+
+ saved_type(DominatingLLVMValue::saved_type Val1, unsigned K)
+ : Vals{Val1, DominatingLLVMValue::saved_type()}, K(K) {}
+
+ saved_type(DominatingLLVMValue::saved_type Val1,
+ DominatingLLVMValue::saved_type Val2)
+ : Vals{Val1, Val2}, K(ComplexAddress) {}
+
+ saved_type(DominatingValue<Address>::saved_type AggregateAddr, unsigned K)
+ : AggregateAddr(AggregateAddr), K(K) {}
public:
static bool needsSaving(RValue value);
@@ -240,11 +262,10 @@ public:
/// A jump destination is an abstract label, branching to which may
/// require a jump out through normal cleanups.
struct JumpDest {
- JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
- JumpDest(llvm::BasicBlock *Block,
- EHScopeStack::stable_iterator Depth,
+ JumpDest() : Block(nullptr), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
unsigned Index)
- : Block(Block), ScopeDepth(Depth), Index(Index) {}
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
bool isValid() const { return Block != nullptr; }
llvm::BasicBlock *getBlock() const { return Block; }
@@ -276,6 +297,30 @@ public:
// because of jumps.
VarBypassDetector Bypasses;
+ /// List of recently emitted OMPCanonicalLoops.
+ ///
+ /// Since OMPCanonicalLoops are nested inside other statements (in particular
+ /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
+ /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
+ /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
+ /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
+ /// this stack when done. Entering a new loop requires clearing this list; it
+ /// either means we start parsing a new loop nest (in which case the previous
+ /// loop nest goes out of scope) or a second loop in the same level in which
+ /// case it would be ambiguous into which of the two (or more) loops the loop
+ /// nest would extend.
+ SmallVector<llvm::CanonicalLoopInfo *, 4> OMPLoopNestStack;
+
+ /// Stack to track the Logical Operator recursion nest for MC/DC.
+ SmallVector<const BinaryOperator *, 16> MCDCLogOpStack;
+
+ /// Stack to track the controlled convergence tokens.
+ SmallVector<llvm::IntrinsicInst *, 4> ConvergenceTokenStack;
+
+ /// Number of nested loop to be consumed by the last surrounding
+ /// loop-associated directive.
+ int ExpectedOMPLoopDepth = 0;
+
// CodeGen lambda for loops and support for ordered clause
typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
JumpDest)>
@@ -298,23 +343,26 @@ public:
/// CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
- llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
/// CurFuncDecl - Holds the Decl for the current outermost
/// non-closure context.
- const Decl *CurFuncDecl;
+ const Decl *CurFuncDecl = nullptr;
/// CurCodeDecl - This is the inner-most code context, which includes blocks.
- const Decl *CurCodeDecl;
- const CGFunctionInfo *CurFnInfo;
+ const Decl *CurCodeDecl = nullptr;
+ const CGFunctionInfo *CurFnInfo = nullptr;
QualType FnRetTy;
llvm::Function *CurFn = nullptr;
+ /// Save Parameter Decl for coroutine.
+ llvm::SmallVector<const ParmVarDecl *, 4> FnArgs;
+
// Holds coroutine data if the current function is a coroutine. We use a
// wrapper to manage its lifetime, so that we don't have to define CGCoroData
// in this header.
struct CGCoroInfo {
std::unique_ptr<CGCoroData> Data;
+ bool InSuspendBlock = false;
CGCoroInfo();
~CGCoroInfo();
};
@@ -324,6 +372,29 @@ public:
return CurCoro.Data != nullptr;
}
+ bool inSuspendBlock() const {
+ return isCoroutine() && CurCoro.InSuspendBlock;
+ }
+
+ // Holds FramePtr for await_suspend wrapper generation,
+ // so that __builtin_coro_frame call can be lowered
+ // directly to value of its second argument
+ struct AwaitSuspendWrapperInfo {
+ llvm::Value *FramePtr = nullptr;
+ };
+ AwaitSuspendWrapperInfo CurAwaitSuspendWrapper;
+
+ // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
+ // It encapsulates SuspendExpr in a function, to separate it's body
+ // from the main coroutine to avoid miscompilations. Intrinisic
+ // is lowered to this function call in CoroSplit pass
+ // Function signature is:
+ // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
+ // where type is one of (void, i1, ptr)
+ llvm::Function *generateAwaitSuspendWrapper(Twine const &CoroName,
+ Twine const &SuspendPointName,
+ CoroutineSuspendExpr const &S);
+
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
@@ -357,6 +428,34 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+private:
+ /// PostAllocaInsertPt - This is a place in the prologue where code can be
+ /// inserted that will be dominated by all the static allocas. This helps
+ /// achieve two things:
+ /// 1. Contiguity of all static allocas (within the prologue) is maintained.
+ /// 2. All other prologue code (which are dominated by static allocas) do
+ /// appear in the source order immediately after all static allocas.
+ ///
+ /// PostAllocaInsertPt will be lazily created when it is *really* required.
+ llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
+
+public:
+ /// Return PostAllocaInsertPt. If it is not yet created, then insert it
+ /// immediately after AllocaInsertPt.
+ llvm::Instruction *getPostAllocaInsertPoint() {
+ if (!PostAllocaInsertPt) {
+ assert(AllocaInsertPt &&
+ "Expected static alloca insertion point at function prologue");
+ assert(AllocaInsertPt->getParent()->isEntryBlock() &&
+ "EBB should be entry block of the current code gen function");
+ PostAllocaInsertPt = AllocaInsertPt->clone();
+ PostAllocaInsertPt->setName("postallocapt");
+ PostAllocaInsertPt->insertAfter(AllocaInsertPt);
+ }
+
+ return PostAllocaInsertPt;
+ }
+
/// API for captured statement code generation.
class CGCapturedStmtInfo {
public:
@@ -409,6 +508,11 @@ public:
/// Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
+ /// Get the CaptureFields
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
+ return CaptureFields;
+ }
+
private:
/// The kind of captured statement being generated.
CapturedRegionKind Kind;
@@ -449,7 +553,7 @@ public:
AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
bool hasFunctionDecl() const {
- return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
+ return isa_and_nonnull<FunctionDecl>(CalleeDecl);
}
const Decl *getDecl() const { return CalleeDecl; }
unsigned getNumParams() const {
@@ -489,7 +593,7 @@ public:
/// potentially set the return value.
bool SawAsmBlock = false;
- const NamedDecl *CurSEHParent = nullptr;
+ GlobalDecl CurSEHParent;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
@@ -502,41 +606,43 @@ public:
/// True if the current statement has nomerge attribute.
bool InNoMergeAttributedStmt = false;
- /// True if the current function should be marked mustprogress.
- bool FnIsMustProgress = false;
+ /// True if the current statement has noinline attribute.
+ bool InNoInlineAttributedStmt = false;
- /// True if the C++ Standard Requires Progress.
- bool CPlusPlusWithProgress() {
- if (CGM.getCodeGenOpts().getFiniteLoops() ==
- CodeGenOptions::FiniteLoopsKind::Never)
- return false;
+ /// True if the current statement has always_inline attribute.
+ bool InAlwaysInlineAttributedStmt = false;
- return getLangOpts().CPlusPlus11 || getLangOpts().CPlusPlus14 ||
- getLangOpts().CPlusPlus17 || getLangOpts().CPlusPlus20;
- }
+ // The CallExpr within the current statement that the musttail attribute
+ // applies to. nullptr if there is no 'musttail' on the current statement.
+ const CallExpr *MustTailCall = nullptr;
- /// True if the C Standard Requires Progress.
- bool CWithProgress() {
- if (CGM.getCodeGenOpts().getFiniteLoops() ==
- CodeGenOptions::FiniteLoopsKind::Always)
- return true;
+ /// Returns true if a function must make progress, which means the
+ /// mustprogress attribute can be added.
+ bool checkIfFunctionMustProgress() {
if (CGM.getCodeGenOpts().getFiniteLoops() ==
CodeGenOptions::FiniteLoopsKind::Never)
return false;
- return getLangOpts().C11 || getLangOpts().C17 || getLangOpts().C2x;
+ // C++11 and later guarantees that a thread eventually will do one of the
+ // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
+ // - terminate,
+ // - make a call to a library I/O function,
+ // - perform an access through a volatile glvalue, or
+ // - perform a synchronization operation or an atomic operation.
+ //
+ // Hence each function is 'mustprogress' in C++11 or later.
+ return getLangOpts().CPlusPlus11;
}
- /// True if the language standard requires progress in functions or
- /// in infinite loops with non-constant conditionals.
- bool LanguageRequiresProgress() {
- return CWithProgress() || CPlusPlusWithProgress();
- }
+ /// Returns true if a loop must make progress, which means the mustprogress
+ /// attribute can be added. \p HasConstantCond indicates whether the branch
+ /// condition is a known constant.
+ bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody);
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
- llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField = nullptr;
/// A mapping from NRVO variables to the flags used to indicate
@@ -545,16 +651,63 @@ public:
EHScopeStack EHStack;
llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
+
+ // A stack of cleanups which were added to EHStack but have to be deactivated
+ // later before being popped or emitted. These are usually deactivated on
+ // exiting a `CleanupDeactivationScope` scope. For instance, after a
+ // full-expr.
+ //
+ // These are specially useful for correctly emitting cleanups while
+ // encountering branches out of expression (through stmt-expr or coroutine
+ // suspensions).
+ struct DeferredDeactivateCleanup {
+ EHScopeStack::stable_iterator Cleanup;
+ llvm::Instruction *DominatingIP;
+ };
+ llvm::SmallVector<DeferredDeactivateCleanup> DeferredDeactivationCleanupStack;
+
+ // Enters a new scope for capturing cleanups which are deferred to be
+ // deactivated, all of which will be deactivated once the scope is exited.
+ struct CleanupDeactivationScope {
+ CodeGenFunction &CGF;
+ size_t OldDeactivateCleanupStackSize;
+ bool Deactivated;
+ CleanupDeactivationScope(CodeGenFunction &CGF)
+ : CGF(CGF), OldDeactivateCleanupStackSize(
+ CGF.DeferredDeactivationCleanupStack.size()),
+ Deactivated(false) {}
+
+ void ForceDeactivate() {
+ assert(!Deactivated && "Deactivating already deactivated scope");
+ auto &Stack = CGF.DeferredDeactivationCleanupStack;
+ for (size_t I = Stack.size(); I > OldDeactivateCleanupStackSize; I--) {
+ CGF.DeactivateCleanupBlock(Stack[I - 1].Cleanup,
+ Stack[I - 1].DominatingIP);
+ Stack[I - 1].DominatingIP->eraseFromParent();
+ }
+ Stack.resize(OldDeactivateCleanupStackSize);
+ Deactivated = true;
+ }
+
+ ~CleanupDeactivationScope() {
+ if (Deactivated)
+ return;
+ ForceDeactivate();
+ }
+ };
+
llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
llvm::Instruction *CurrentFuncletPad = nullptr;
class CallLifetimeEnd final : public EHScopeStack::Cleanup {
+ bool isRedundantBeforeReturn() override { return true; }
+
llvm::Value *Addr;
llvm::Value *Size;
public:
- CallLifetimeEnd(Address addr, llvm::Value *size)
+ CallLifetimeEnd(RawAddress addr, llvm::Value *size)
: Addr(addr.getPointer()), Size(size) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
@@ -566,9 +719,11 @@ public:
struct LifetimeExtendedCleanupHeader {
/// The size of the following cleanup object.
unsigned Size;
- /// The kind of cleanup to push: a value from the CleanupKind enumeration.
+ /// The kind of cleanup to push.
+ LLVM_PREFERRED_TYPE(CleanupKind)
unsigned Kind : 31;
/// Whether this is a conditional cleanup.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsConditional : 1;
size_t getSize() const { return Size; }
@@ -577,7 +732,7 @@ public:
};
/// i32s containing the indexes of the cleanup destinations.
- Address NormalCleanupDest = Address::invalid();
+ RawAddress NormalCleanupDest = RawAddress::invalid();
unsigned NextCleanupDestIndex = 1;
@@ -648,7 +803,7 @@ public:
FPOptions OldFPFeatures;
llvm::fp::ExceptionBehavior OldExcept;
llvm::RoundingMode OldRounding;
- Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
+ std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
};
FPOptions CurFPFeatures;
@@ -667,11 +822,11 @@ public:
/// An i1 variable indicating whether or not the @finally is
/// running for an exception.
- llvm::AllocaInst *ForEHVar;
+ llvm::AllocaInst *ForEHVar = nullptr;
/// An i8* variable into which the exception pointer to rethrow
/// has been saved.
- llvm::AllocaInst *SavedExnVar;
+ llvm::AllocaInst *SavedExnVar = nullptr;
public:
void enter(CodeGenFunction &CGF, const Stmt *Finally,
@@ -712,10 +867,10 @@ public:
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
if (!isInConditionalBranch())
- return pushCleanupAfterFullExprWithActiveFlag<T>(Kind, Address::invalid(),
- A...);
+ return pushCleanupAfterFullExprWithActiveFlag<T>(
+ Kind, RawAddress::invalid(), A...);
- Address ActiveFlag = createCleanupActiveFlag();
+ RawAddress ActiveFlag = createCleanupActiveFlag();
assert(!DominatingValue<Address>::needsSaving(ActiveFlag) &&
"cleanup active flag should never need saving");
@@ -728,7 +883,7 @@ public:
template <class T, class... As>
void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind,
- Address ActiveFlag, As... A) {
+ RawAddress ActiveFlag, As... A) {
LifetimeExtendedCleanupHeader Header = {sizeof(T), Kind,
ActiveFlag.isValid()};
@@ -743,7 +898,20 @@ public:
new (Buffer) LifetimeExtendedCleanupHeader(Header);
new (Buffer + sizeof(Header)) T(A...);
if (Header.IsConditional)
- new (Buffer + sizeof(Header) + sizeof(T)) Address(ActiveFlag);
+ new (Buffer + sizeof(Header) + sizeof(T)) RawAddress(ActiveFlag);
+ }
+
+ // Push a cleanup onto EHStack and deactivate it later. It is usually
+ // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
+ // full expression).
+ template <class T, class... As>
+ void pushCleanupAndDeferDeactivation(CleanupKind Kind, As... A) {
+ // Placeholder dominating IP for this cleanup.
+ llvm::Instruction *DominatingIP =
+ Builder.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy));
+ EHStack.pushCleanup<T>(Kind, A...);
+ DeferredDeactivationCleanupStack.push_back(
+ {EHStack.stable_begin(), DominatingIP});
}
/// Set up the last cleanup that was pushed as a conditional
@@ -752,8 +920,8 @@ public:
initFullExprCleanupWithFlag(createCleanupActiveFlag());
}
- void initFullExprCleanupWithFlag(Address ActiveFlag);
- Address createCleanupActiveFlag();
+ void initFullExprCleanupWithFlag(RawAddress ActiveFlag);
+ RawAddress createCleanupActiveFlag();
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
@@ -769,7 +937,8 @@ public:
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
- void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false,
+ bool ForDeactivation = false);
/// DeactivateCleanupBlock - Deactivates the given cleanup block.
/// The block cannot be reactivated. Pops it if it's the top of the
@@ -797,6 +966,7 @@ public:
class RunCleanupsScope {
EHScopeStack::stable_iterator CleanupStackDepth, OldCleanupScopeDepth;
size_t LifetimeExtendedCleanupStackSize;
+ CleanupDeactivationScope DeactivateCleanups;
bool OldDidCallStackSave;
protected:
bool PerformCleanup;
@@ -811,8 +981,7 @@ public:
public:
/// Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
- : PerformCleanup(true), CGF(CGF)
- {
+ : DeactivateCleanups(CGF), PerformCleanup(true), CGF(CGF) {
CleanupStackDepth = CGF.EHStack.stable_begin();
LifetimeExtendedCleanupStackSize =
CGF.LifetimeExtendedCleanupStack.size();
@@ -842,6 +1011,7 @@ public:
void ForceCleanup(std::initializer_list<llvm::Value**> ValuesToReload = {}) {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
+ DeactivateCleanups.ForceDeactivate();
CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize,
ValuesToReload);
PerformCleanup = false;
@@ -941,7 +1111,7 @@ public:
QualType VarTy = LocalVD->getType();
if (VarTy->isReferenceType()) {
Address Temp = CGF.CreateMemTemp(VarTy);
- CGF.Builder.CreateStore(TempAddr.getPointer(), Temp);
+ CGF.Builder.CreateStore(TempAddr.emitRawPointer(CGF), Temp);
TempAddr = Temp;
}
SavedTempAddresses.try_emplace(LocalVD, TempAddr);
@@ -997,15 +1167,14 @@ public:
/// Enter a new OpenMP private scope.
explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
- /// Registers \p LocalVD variable as a private and apply \p PrivateGen
- /// function for it to generate corresponding private variable. \p
- /// PrivateGen returns an address of the generated private variable.
+ /// Registers \p LocalVD variable as a private with \p Addr as the address
+ /// of the corresponding private variable. \p
+ /// PrivateGen is the address of the generated private variable.
/// \return true if the variable is registered as private, false if it has
/// been privatized already.
- bool addPrivate(const VarDecl *LocalVD,
- const llvm::function_ref<Address()> PrivateGen) {
+ bool addPrivate(const VarDecl *LocalVD, Address Addr) {
assert(PerformCleanup && "adding private to dead scope");
- return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
+ return MappedVars.setVarAddr(CGF, LocalVD, Addr);
}
/// Privatizes local variables previously registered as private.
@@ -1020,7 +1189,7 @@ public:
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
- MappedVars.restore(CGF);
+ restoreMap();
}
/// Exit scope - all the mapped variables are restored.
@@ -1034,6 +1203,11 @@ public:
VD = VD->getCanonicalDecl();
return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
}
+
+ /// Restore all mapped variables w/o clean up. This is usefully when we want
+ /// to reference the original variables but don't want the clean up because
+ /// that could emit lifetime end too early, causing backend issue #56913.
+ void restoreMap() { MappedVars.restore(CGF); }
};
/// Save/restore original map of previously emitted local vars in case when we
@@ -1132,10 +1306,12 @@ public:
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
- void setBeforeOutermostConditional(llvm::Value *value, Address addr) {
+ void setBeforeOutermostConditional(llvm::Value *value, Address addr,
+ CodeGenFunction &CGF) {
assert(isInConditionalBranch());
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
- auto store = new llvm::StoreInst(value, addr.getPointer(), &block->back());
+ auto store =
+ new llvm::StoreInst(value, addr.emitRawPointer(CGF), &block->back());
store->setAlignment(addr.getAlignment().getAsAlign());
}
@@ -1165,11 +1341,11 @@ public:
/// destroyed by aggressive peephole optimizations that assume that
/// all uses of a value have been realized in the IR.
class PeepholeProtection {
- llvm::Instruction *Inst;
+ llvm::Instruction *Inst = nullptr;
friend class CodeGenFunction;
public:
- PeepholeProtection() : Inst(nullptr) {}
+ PeepholeProtection() = default;
};
/// A non-RAII class containing all the information about a bound
@@ -1430,11 +1606,15 @@ private:
};
OpenMPCancelExitStack OMPCancelStack;
- /// Calculate branch weights for the likelihood attribute
- llvm::MDNode *createBranchWeights(Stmt::Likelihood LH) const;
+ /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
+ llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
+ Stmt::Likelihood LH);
CodeGenPGO PGO;
+ /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
+ Address MCDCCondBitmapAddr = Address::invalid();
+
/// Calculate branch weights appropriate for PGO data
llvm::MDNode *createProfileWeights(uint64_t TrueCount,
uint64_t FalseCount) const;
@@ -1442,29 +1622,68 @@ private:
llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
uint64_t LoopCount) const;
- /// Calculate the branch weight for PGO data or the likelihood attribute.
- /// The function tries to get the weight of \ref createProfileWeightsForLoop.
- /// If that fails it gets the weight of \ref createBranchWeights.
- llvm::MDNode *createProfileOrBranchWeightsForLoop(const Stmt *Cond,
- uint64_t LoopCount,
- const Stmt *Body) const;
-
public:
/// Increment the profiler's counter for the given statement by \p StepV.
/// If \p StepV is null, the default increment is 1.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
if (CGM.getCodeGenOpts().hasProfileClangInstr() &&
- !CurFn->hasFnAttribute(llvm::Attribute::NoProfile))
- PGO.emitCounterIncrement(Builder, S, StepV);
+ !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
+ !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) {
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
+ PGO.emitCounterSetOrIncrement(Builder, S, StepV);
+ }
PGO.setCurrentStmt(S);
}
+ bool isMCDCCoverageEnabled() const {
+ return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ CGM.getCodeGenOpts().MCDCCoverage &&
+ !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
+ }
+
+ /// Allocate a temp value on the stack that MCDC can use to track condition
+ /// results.
+ void maybeCreateMCDCCondBitmap() {
+ if (isMCDCCoverageEnabled()) {
+ PGO.emitMCDCParameters(Builder);
+ MCDCCondBitmapAddr =
+ CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
+ }
+ }
+
+ bool isBinaryLogicalOp(const Expr *E) const {
+ const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
+ return (BOp && BOp->isLogicalOp());
+ }
+
+ /// Zero-init the MCDC temp value.
+ void maybeResetMCDCCondBitmap(const Expr *E) {
+ if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) {
+ PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
+ PGO.setCurrentStmt(E);
+ }
+ }
+
+ /// Increment the profiler's counter for the given expression by \p StepV.
+ /// If \p StepV is null, the default increment is 1.
+ void maybeUpdateMCDCTestVectorBitmap(const Expr *E) {
+ if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) {
+ PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr, *this);
+ PGO.setCurrentStmt(E);
+ }
+ }
+
+ /// Update the MCDC temp value with the condition's evaluated result.
+ void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
+ if (isMCDCCoverageEnabled()) {
+ PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val, *this);
+ PGO.setCurrentStmt(E);
+ }
+ }
+
/// Get the profiler's count for the given statement.
uint64_t getProfileCount(const Stmt *S) {
- Optional<uint64_t> Count = PGO.getStmtCount(S);
- if (!Count.hasValue())
- return 0;
- return *Count;
+ return PGO.getStmtCount(S).value_or(0);
}
/// Set the profiler's current count.
@@ -1552,7 +1771,7 @@ public:
: CGF(CGF), OldCXXThisValue(CGF.CXXThisValue),
OldCXXThisAlignment(CGF.CXXThisAlignment),
SourceLocScope(E, CGF.CurSourceLocExprScope) {
- CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getPointer();
+ CGF.CXXThisValue = CGF.CXXDefaultInitExprThis.getBasePointer();
CGF.CXXThisAlignment = CGF.CXXDefaultInitExprThis.getAlignment();
}
~CXXDefaultInitExprScope() {
@@ -1723,33 +1942,55 @@ public:
}
/// Emit the body of an OMP region
- /// \param CGF The Codegen function this belongs to
- /// \param RegionBodyStmt The body statement for the OpenMP region being
- /// generated
- /// \param CodeGenIP Insertion point for generating the body code.
- /// \param FiniBB The finalization basic block
- static void EmitOMPRegionBody(CodeGenFunction &CGF,
- const Stmt *RegionBodyStmt,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param AllocaIP Where to insert alloca instructions
+ /// \param CodeGenIP Where to insert the region code
+ /// \param RegionName Name to be used for new blocks
+ static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ Twine RegionName);
+
+ static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB, llvm::Function *Fn,
+ ArrayRef<llvm::Value *> Args) {
llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
CodeGenIPBBTI->eraseFromParent();
CGF.Builder.SetInsertPoint(CodeGenIPBB);
- CGF.EmitStmt(RegionBodyStmt);
+ if (Fn->doesNotThrow())
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ else
+ CGF.EmitRuntimeCall(Fn, Args);
if (CGF.Builder.saveIP().isSet())
CGF.Builder.CreateBr(&FiniBB);
}
+ /// Emit the body of an OMP region that will be outlined in
+ /// OpenMPIRBuilder::finalize().
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param AllocaIP Where to insert alloca instructions
+ /// \param CodeGenIP Where to insert the region code
+ /// \param RegionName Name to be used for new blocks
+ static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ Twine RegionName);
+
/// RAII for preserving necessary info during Outlined region body codegen.
class OutlinedRegionBodyRAII {
llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
CodeGenFunction::JumpDest OldReturnBlock;
- CGBuilderTy::InsertPoint IP;
CodeGenFunction &CGF;
public:
@@ -1760,7 +2001,6 @@ public:
"Must specify Insertion point for allocas of outlined function");
OldAllocaIP = CGF.AllocaInsertPt;
CGF.AllocaInsertPt = &*AllocaIP.getPoint();
- IP = CGF.Builder.saveIP();
OldReturnBlock = CGF.ReturnBlock;
CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
@@ -1769,7 +2009,6 @@ public:
~OutlinedRegionBodyRAII() {
CGF.AllocaInsertPt = OldAllocaIP;
CGF.ReturnBlock = OldReturnBlock;
- CGF.Builder.restoreIP(IP);
}
};
@@ -1866,6 +2105,9 @@ private:
/// Check if the return value of this function requires sanitization.
bool requiresReturnValueCheck() const;
+ bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
+ bool hasInAllocaArg(const CXXMethodDecl *MD);
+
llvm::BasicBlock *TerminateLandingPad = nullptr;
llvm::BasicBlock *TerminateHandler = nullptr;
llvm::SmallVector<llvm::BasicBlock *, 2> TrapBBs;
@@ -1877,13 +2119,13 @@ private:
/// function attribute.
unsigned LargestVectorWidth = 0;
- /// True if we need emit the life-time markers.
- const bool ShouldEmitLifetimeMarkers;
+ /// True if we need emit the life-time markers. This is initially set in
+ /// the constructor, but could be overwritten to true if this is a coroutine.
+ bool ShouldEmitLifetimeMarkers;
/// Add OpenCL kernel arg metadata and the kernel attribute metadata to
/// the function metadata.
- void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
- llvm::Function *Fn);
+ void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
public:
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
@@ -1915,7 +2157,7 @@ public:
llvm::Value *getExceptionFromSlot();
llvm::Value *getSelectorFromSlot();
- Address getNormalCleanupDestSlot();
+ RawAddress getNormalCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
@@ -1930,7 +2172,7 @@ public:
return getInvokeDestImpl();
}
- bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
+ bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
const TargetInfo &getTarget() const { return Target; }
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
@@ -1961,6 +2203,11 @@ public:
Address addr, QualType type);
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray);
+ void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind,
+ Address addr, QualType type);
+ void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr,
+ QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray);
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
@@ -1968,6 +2215,8 @@ public:
llvm::Value *CompletePtr,
QualType ElementType);
void pushStackRestore(CleanupKind kind, Address SPMem);
+ void pushKmpcAllocFree(CleanupKind Kind,
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
llvm::Function *generateDestroyHelper(Address addr, QualType type,
@@ -2128,10 +2377,17 @@ public:
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
- CallArgList &CallArgs);
+ CallArgList &CallArgs,
+ const CGFunctionInfo *CallOpFnInfo = nullptr,
+ llvm::Constant *CallOpFn = nullptr);
void EmitLambdaBlockInvokeBody();
- void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
+ CallArgList &CallArgs);
+ void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
+ const CGFunctionInfo **ImplFnInfo,
+ llvm::Function **ImplFn);
+ void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) {
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
}
@@ -2140,7 +2396,7 @@ public:
/// Emit the unified return block, trying to avoid its emission when
/// possible.
/// \return The debug location of the user written return statement if the
- /// return block is is avoided.
+ /// return block is avoided.
llvm::DebugLoc EmitReturnBlock();
/// FinishFunction - Complete IR generation of the current function. It is
@@ -2197,10 +2453,20 @@ public:
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+ // VTableTrapMode - whether we guarantee that loading the
+ // vtable is guaranteed to trap on authentication failure,
+ // even if the resulting vtable pointer is unused.
+ enum class VTableAuthMode {
+ Authenticate,
+ MustTrap,
+ UnsafeUbsanStrip // Should only be used for Vptr UBSan check
+ };
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
- llvm::Value *GetVTablePtr(Address This, llvm::Type *VTableTy,
- const CXXRecordDecl *VTableClass);
+ llvm::Value *
+ GetVTablePtr(Address This, llvm::Type *VTableTy,
+ const CXXRecordDecl *VTableClass,
+ VTableAuthMode AuthMode = VTableAuthMode::Authenticate);
enum CFITypeCheckKind {
CFITCK_VCall,
@@ -2215,9 +2481,8 @@ public:
/// Derived is the presumed address of an object of type T after a
/// cast. If T is a polymorphic class type, emit a check that the virtual
/// table for Derived belongs to a class derived from T.
- void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
- bool MayBeNull, CFITypeCheckKind TCK,
- SourceLocation Loc);
+ void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
+ CFITypeCheckKind TCK, SourceLocation Loc);
/// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
/// If vptr CFI is enabled, emit a check that VTable is valid.
@@ -2241,7 +2506,9 @@ public:
bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
/// Emit a type checked load from the given vtable.
- llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
+ llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
+ llvm::Value *VTable,
+ llvm::Type *VTableTy,
uint64_t VTableByteOffset);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
@@ -2254,6 +2521,10 @@ public:
/// instrumented with __cyg_profile_func_* calls
bool ShouldInstrumentFunction();
+ /// ShouldSkipSanitizerInstrumentation - Return true if the current function
+ /// should not be instrumented with sanitizers.
+ bool ShouldSkipSanitizerInstrumentation();
+
/// ShouldXRayInstrument - Return true if the current function should be
/// instrumented with XRay nop sleds.
bool ShouldXRayInstrumentFunction() const;
@@ -2266,14 +2537,9 @@ public:
/// XRay typed event handling calls.
bool AlwaysEmitXRayTypedEvents() const;
- /// Encode an address into a form suitable for use in a function prologue.
- llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
- llvm::Constant *Addr);
-
- /// Decode an address used in a function prologue, encoded by \c
- /// EncodeAddrForUseInPrologue.
- llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
- llvm::Value *EncodedAddr);
+ /// Return a type hash constant for a function instrumented by
+ /// -fsanitize=function.
+ llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
@@ -2310,6 +2576,8 @@ public:
llvm::Type *ConvertTypeForMem(QualType T);
llvm::Type *ConvertType(QualType T);
+ llvm::Type *convertTypeForLoadStore(QualType ASTTy,
+ llvm::Type *LLVMTy = nullptr);
llvm::Type *ConvertType(const TypeDecl *T) {
return ConvertType(getContext().getTypeDeclType(T));
}
@@ -2395,10 +2663,41 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
+ Address mergeAddressesInConditionalExpr(Address LHS, Address RHS,
+ llvm::BasicBlock *LHSBlock,
+ llvm::BasicBlock *RHSBlock,
+ llvm::BasicBlock *MergeBlock,
+ QualType MergedType) {
+ Builder.SetInsertPoint(MergeBlock);
+ llvm::PHINode *PtrPhi = Builder.CreatePHI(LHS.getType(), 2, "cond");
+ PtrPhi->addIncoming(LHS.getBasePointer(), LHSBlock);
+ PtrPhi->addIncoming(RHS.getBasePointer(), RHSBlock);
+ LHS.replaceBasePointer(PtrPhi);
+ LHS.setAlignment(std::min(LHS.getAlignment(), RHS.getAlignment()));
+ return LHS;
+ }
+
+ /// Construct an address with the natural alignment of T. If a pointer to T
+ /// is expected to be signed, the pointer passed to this function must have
+ /// been signed, and the returned Address will have the pointer authentication
+ /// information needed to authenticate the signed pointer.
+ Address makeNaturalAddressForPointer(
+ llvm::Value *Ptr, QualType T, CharUnits Alignment = CharUnits::Zero(),
+ bool ForPointeeType = false, LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull) {
+ if (Alignment.isZero())
+ Alignment =
+ CGM.getNaturalTypeAlignment(T, BaseInfo, TBAAInfo, ForPointeeType);
+ return Address(Ptr, ConvertTypeForMem(T), Alignment,
+ CGM.getPointerAuthInfoForPointeeType(T), /*Offset=*/nullptr,
+ IsKnownNonNull);
+ }
+
LValue MakeAddrLValue(Address Addr, QualType T,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
- CGM.getTBAAAccessInfo(T));
+ return MakeAddrLValue(Addr, T, LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
}
LValue MakeAddrLValue(Address Addr, QualType T, LValueBaseInfo BaseInfo,
@@ -2408,18 +2707,39 @@ public:
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
+ return MakeAddrLValue(makeNaturalAddressForPointer(V, T, Alignment), T,
+ LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
}
- LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
- LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- BaseInfo, TBAAInfo);
+ /// Same as MakeAddrLValue above except that the pointer is known to be
+ /// unsigned.
+ LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
+ AlignmentSource Source = AlignmentSource::Type) {
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
+ }
+
+ LValue
+ MakeAddrLValueWithoutTBAA(Address Addr, QualType T,
+ AlignmentSource Source = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ TBAAAccessInfo());
}
+ /// Given a value of type T* that may not be to a complete object, construct
+ /// an l-value with the natural pointee alignment of T.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
- LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
+
+ LValue
+ MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
+
+ /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
+ /// to be unsigned.
+ LValue MakeNaturalAlignPointeeRawAddrLValue(llvm::Value *V, QualType T);
+
+ LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T);
Address EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo = nullptr,
@@ -2433,11 +2753,41 @@ public:
return EmitLoadOfReferenceLValue(RefLVal);
}
+ /// Load a pointer with type \p PtrTy stored at address \p Ptr.
+ /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
+ /// it is loaded from.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
LValueBaseInfo *BaseInfo = nullptr,
TBAAAccessInfo *TBAAInfo = nullptr);
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy);
+private:
+ struct AllocaTracker {
+ void Add(llvm::AllocaInst *I) { Allocas.push_back(I); }
+ llvm::SmallVector<llvm::AllocaInst *> Take() { return std::move(Allocas); }
+
+ private:
+ llvm::SmallVector<llvm::AllocaInst *> Allocas;
+ };
+ AllocaTracker *Allocas = nullptr;
+
+public:
+ // Captures all the allocas created during the scope of its RAII object.
+ struct AllocaTrackerRAII {
+ AllocaTrackerRAII(CodeGenFunction &CGF)
+ : CGF(CGF), OldTracker(CGF.Allocas) {
+ CGF.Allocas = &Tracker;
+ }
+ ~AllocaTrackerRAII() { CGF.Allocas = OldTracker; }
+
+ llvm::SmallVector<llvm::AllocaInst *> Take() { return Tracker.Take(); }
+
+ private:
+ CodeGenFunction &CGF;
+ AllocaTracker *OldTracker;
+ AllocaTracker Tracker;
+ };
+
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
/// block if \p ArraySize is nullptr, otherwise inserts it at the current
/// insertion point of the builder. The caller is responsible for setting an
@@ -2466,13 +2816,13 @@ public:
/// more efficient if the caller knows that the address will not be exposed.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty, const Twine &Name = "tmp",
llvm::Value *ArraySize = nullptr);
- Address CreateTempAlloca(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp",
- llvm::Value *ArraySize = nullptr,
- Address *Alloca = nullptr);
- Address CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
- const Twine &Name = "tmp",
- llvm::Value *ArraySize = nullptr);
+ RawAddress CreateTempAlloca(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr,
+ RawAddress *Alloca = nullptr);
+ RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align,
+ const Twine &Name = "tmp",
+ llvm::Value *ArraySize = nullptr);
/// CreateDefaultAlignedTempAlloca - This creates an alloca with the
/// default ABI alignment of the given LLVM type.
@@ -2484,17 +2834,8 @@ public:
/// not hand this address off to arbitrary IRGen routines, and especially
/// do not pass it as an argument to a function that might expect a
/// properly ABI-aligned value.
- Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
- const Twine &Name = "tmp");
-
- /// InitTempAlloca - Provide an initial value for the given alloca which
- /// will be observable at all locations in the function.
- ///
- /// The address should be something that was returned from one of
- /// the CreateTempAlloca or CreateMemTemp routines, and the
- /// initializer must be valid in the entry block (i.e. it must
- /// either be a constant or an argument value).
- void InitTempAlloca(Address Alloca, llvm::Value *Value);
+ RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
@@ -2504,41 +2845,52 @@ public:
///
/// That is, this is exactly equivalent to CreateMemTemp, but calling
/// ConvertType instead of ConvertTypeForMem.
- Address CreateIRTemp(QualType T, const Twine &Name = "tmp");
+ RawAddress CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignmen and cast it to the default address space. Returns
/// the original alloca instruction by \p Alloca if it is not nullptr.
- Address CreateMemTemp(QualType T, const Twine &Name = "tmp",
- Address *Alloca = nullptr);
- Address CreateMemTemp(QualType T, CharUnits Align, const Twine &Name = "tmp",
- Address *Alloca = nullptr);
+ RawAddress CreateMemTemp(QualType T, const Twine &Name = "tmp",
+ RawAddress *Alloca = nullptr);
+ RawAddress CreateMemTemp(QualType T, CharUnits Align,
+ const Twine &Name = "tmp",
+ RawAddress *Alloca = nullptr);
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignmen without casting it to the default address space.
- Address CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
- Address CreateMemTempWithoutCast(QualType T, CharUnits Align,
- const Twine &Name = "tmp");
+ RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name = "tmp");
+ RawAddress CreateMemTempWithoutCast(QualType T, CharUnits Align,
+ const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp",
- Address *Alloca = nullptr) {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca),
- T.getQualifiers(),
- AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased,
- AggValueSlot::DoesNotOverlap);
+ RawAddress *Alloca = nullptr) {
+ return AggValueSlot::forAddr(
+ CreateMemTemp(T, Name, Alloca), T.getQualifiers(),
+ AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap);
}
- /// Emit a cast to void* in the appropriate address space.
- llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
-
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
+ /// Retrieve the implicit cast expression of the rhs in a binary operator
+ /// expression by passing pointers to Value and QualType
+ /// This is used for implicit bitfield conversion checks, which
+ /// must compare with the value before potential truncation.
+ llvm::Value *EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E,
+ llvm::Value **Previous,
+ QualType *SrcType);
+
+ /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
+ /// so we use the value after conversion.
+ void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType,
+ llvm::Value *Dst, QualType DstType,
+ const CGBitFieldInfo &Info,
+ SourceLocation Loc);
+
/// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
void EmitIgnoredExpr(const Expr *E);
@@ -2676,7 +3028,8 @@ public:
/// \returns A pointer to the argument.
// FIXME: We should be able to get rid of this method and use the va_arg
// instruction in LLVM instead once it works well enough.
- Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr);
+ RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr,
+ AggValueSlot Slot = AggValueSlot::ignored());
/// emitArrayLength - Compute the length of an array, even if it's a
/// VLA, and drill down to the base element type.
@@ -2796,7 +3149,7 @@ public:
AggValueSlot::Overlap_t Overlap,
SourceLocation Loc, bool NewPointerIsChecked);
- /// Emit assumption load for all bases. Requires to be be called only on
+ /// Emit assumption load for all bases. Requires to be called only on
/// most-derived class and not under construction of the object.
void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
@@ -2835,7 +3188,12 @@ public:
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
Address Ptr);
- llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
+ void EmitSehCppScopeBegin();
+ void EmitSehCppScopeEnd();
+ void EmitSehTryScopeBegin();
+ void EmitSehTryScopeEnd();
+
+ llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
@@ -2901,6 +3259,25 @@ public:
/// calls to EmitTypeCheck can be skipped.
bool sanitizePerformTypeCheck() const;
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV,
+ QualType Type, SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr) {
+ if (!sanitizePerformTypeCheck())
+ return;
+ EmitTypeCheck(TCK, Loc, LV.emitRawPointer(*this), Type, LV.getAlignment(),
+ SkippedChecks, ArraySize);
+ }
+
+ void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, Address Addr,
+ QualType Type, CharUnits Alignment = CharUnits::Zero(),
+ SanitizerSet SkippedChecks = SanitizerSet(),
+ llvm::Value *ArraySize = nullptr) {
+ if (!sanitizePerformTypeCheck())
+ return;
+ EmitTypeCheck(TCK, Loc, Addr.emitRawPointer(*this), Type, Alignment,
+ SkippedChecks, ArraySize);
+ }
+
/// Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type
/// (or if ArraySize is provided, for an array of that bound).
@@ -2914,6 +3291,25 @@ public:
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
QualType IndexType, bool Accessed);
+ void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
+ llvm::Value *Index, QualType IndexType,
+ QualType IndexedType, bool Accessed);
+
+ // Find a struct's flexible array member and get its offset. It may be
+ // embedded inside multiple sub-structs, but must still be the last field.
+ const FieldDecl *
+ FindFlexibleArrayMemberFieldAndOffset(ASTContext &Ctx, const RecordDecl *RD,
+ const FieldDecl *FAMDecl,
+ uint64_t &Offset);
+
+ /// Find the FieldDecl specified in a FAM's "counted_by" attribute. Returns
+ /// \p nullptr if either the attribute or the field doesn't exist.
+ const FieldDecl *FindCountedByField(const FieldDecl *FD);
+
+ /// Build an expression accessing the "counted_by" field.
+ llvm::Value *EmitCountedByFieldExpr(const Expr *Base,
+ const FieldDecl *FAMDecl,
+ const FieldDecl *CountDecl);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
@@ -2982,17 +3378,17 @@ public:
/// Address with original alloca instruction. Invalid if the variable was
/// emitted as a global constant.
- Address AllocaAddr;
+ RawAddress AllocaAddr;
struct Invalid {};
AutoVarEmission(Invalid)
: Variable(nullptr), Addr(Address::invalid()),
- AllocaAddr(Address::invalid()) {}
+ AllocaAddr(RawAddress::invalid()) {}
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsEscapingByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
+ SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -3015,7 +3411,7 @@ public:
}
/// Returns the address for the original alloca instruction.
- Address getOriginalAllocatedAddress() const { return AllocaAddr; }
+ RawAddress getOriginalAllocatedAddress() const { return AllocaAddr; }
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
@@ -3045,20 +3441,32 @@ public:
llvm::GlobalValue::LinkageTypes Linkage);
class ParamValue {
- llvm::Value *Value;
- unsigned Alignment;
- ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ union {
+ Address Addr;
+ llvm::Value *Value;
+ };
+
+ bool IsIndirect;
+
+ ParamValue(llvm::Value *V) : Value(V), IsIndirect(false) {}
+ ParamValue(Address A) : Addr(A), IsIndirect(true) {}
+
public:
static ParamValue forDirect(llvm::Value *value) {
- return ParamValue(value, 0);
+ return ParamValue(value);
}
static ParamValue forIndirect(Address addr) {
assert(!addr.getAlignment().isZero());
- return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ return ParamValue(addr);
}
- bool isIndirect() const { return Alignment != 0; }
- llvm::Value *getAnyValue() const { return Value; }
+ bool isIndirect() const { return IsIndirect; }
+ llvm::Value *getAnyValue() const {
+ if (!isIndirect())
+ return Value;
+ assert(!Addr.hasOffset() && "unexpected offset");
+ return Addr.getBasePointer();
+ }
llvm::Value *getDirectValue() const {
assert(!isIndirect());
@@ -3067,7 +3475,7 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, CharUnits::fromQuantity(Alignment));
+ return Addr;
}
};
@@ -3117,7 +3525,7 @@ public:
/// This function may clear the current insertion point; callers should use
/// EnsureInsertPoint if they wish to subsequently generate code without first
/// calling EmitBlock, EmitBranch, or EmitStmt.
- void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
+ void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = std::nullopt);
/// EmitSimpleStmt - Try to emit a "simple" statement which does not
/// necessarily require an insertion point or debug information; typically
@@ -3145,10 +3553,10 @@ public:
void EmitIfStmt(const IfStmt &S);
void EmitWhileStmt(const WhileStmt &S,
- ArrayRef<const Attr *> Attrs = None);
- void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
+ ArrayRef<const Attr *> Attrs = std::nullopt);
+ void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = std::nullopt);
void EmitForStmt(const ForStmt &S,
- ArrayRef<const Attr *> Attrs = None);
+ ArrayRef<const Attr *> Attrs = std::nullopt);
void EmitReturnStmt(const ReturnStmt &S);
void EmitDeclStmt(const DeclStmt &S);
void EmitBreakStmt(const BreakStmt &S);
@@ -3185,6 +3593,8 @@ public:
void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
void EnterSEHTryStmt(const SEHTryStmt &S);
void ExitSEHTryStmt(const SEHTryStmt &S);
+ void VolatilizeTryBlocks(llvm::BasicBlock *BB,
+ llvm::SmallPtrSet<llvm::BasicBlock *, 10> &V);
void pushSEHCleanup(CleanupKind kind,
llvm::Function *FinallyFunc);
@@ -3223,7 +3633,7 @@ public:
llvm::Value *ParentFP);
void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
- ArrayRef<const Attr *> Attrs = None);
+ ArrayRef<const Attr *> Attrs = std::nullopt);
/// Controls insertion of cancellation exit blocks in worksharing constructs.
class OMPCancelStackRAII {
@@ -3300,10 +3710,12 @@ public:
OMPPrivateScope &PrivateScope);
void EmitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap);
void EmitOMPUseDeviceAddrClause(
const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap);
/// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
@@ -3398,15 +3810,25 @@ public:
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
const RegionCodeGenTy &BodyGen,
OMPTargetDataInfo &InputInfo);
-
+ void processInReduction(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data,
+ CodeGenFunction &CGF,
+ const CapturedStmt *CS,
+ OMPPrivateScope &Scope);
+ void EmitOMPMetaDirective(const OMPMetaDirective &S);
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
+ void EmitOMPTileDirective(const OMPTileDirective &S);
+ void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
+ void EmitOMPReverseDirective(const OMPReverseDirective &S);
+ void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S);
void EmitOMPForDirective(const OMPForDirective &S);
void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
void EmitOMPSectionDirective(const OMPSectionDirective &S);
void EmitOMPSingleDirective(const OMPSingleDirective &S);
void EmitOMPMasterDirective(const OMPMasterDirective &S);
+ void EmitOMPMaskedDirective(const OMPMaskedDirective &S);
void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
@@ -3414,6 +3836,7 @@ public:
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S);
void EmitOMPTaskDirective(const OMPTaskDirective &S);
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
+ void EmitOMPErrorDirective(const OMPErrorDirective &S);
void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
@@ -3469,6 +3892,15 @@ public:
const OMPTargetTeamsDistributeParallelForSimdDirective &S);
void EmitOMPTargetTeamsDistributeSimdDirective(
const OMPTargetTeamsDistributeSimdDirective &S);
+ void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
+ void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
+ void EmitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective &S);
+ void EmitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective &S);
+ void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
+ void EmitOMPInteropDirective(const OMPInteropDirective &S);
+ void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
/// Emit device code for the target directive.
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
@@ -3507,9 +3939,31 @@ public:
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForSimdDirective &S);
+ /// Emit device code for the target teams loop directive.
+ static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsGenericLoopDirective &S);
+
+ /// Emit device code for the target parallel loop directive.
+ static void EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetParallelGenericLoopDirective &S);
+
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForDirective &S);
+
+ /// Emit the Stmt \p S and return its topmost canonical loop, if any.
+ /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
+ /// future it is meant to be the number of loops expected in the loop nests
+ /// (usually specified by the "collapse" clause) that are collapsed to a
+ /// single loop by this function.
+ llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
+ int Depth);
+
+ /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
+ void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S);
+
/// Emit inner loop of the worksharing/simd construct.
///
/// \param S Directive, for which the inner loop must be emitted.
@@ -3546,7 +4000,7 @@ public:
const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
/// Helpers for the OpenMP loop directives.
- void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
+ void EmitOMPSimdInit(const OMPLoopDirective &D);
void EmitOMPSimdFinal(
const OMPLoopDirective &D,
const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
@@ -3582,6 +4036,8 @@ private:
Expr *NextLB = nullptr;
/// Update of UB after a whole chunk has been executed
Expr *NextUB = nullptr;
+ /// Distinguish between the for distribute and sections
+ OpenMPDirectiveKind DKind = llvm::omp::OMPD_unknown;
OMPLoopArguments() = default;
OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
@@ -3611,6 +4067,22 @@ private:
void EmitSections(const OMPExecutableDirective &S);
public:
+ //===--------------------------------------------------------------------===//
+ // OpenACC Emission
+ //===--------------------------------------------------------------------===//
+ void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S) {
+ // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
+ // simply emitting its structured block, but in the future we will implement
+ // some sort of IR.
+ EmitStmt(S.getStructuredBlock());
+ }
+
+ void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S) {
+ // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
+ // simply emitting its loop, but in the future we will implement
+ // some sort of IR.
+ EmitStmt(S.getLoop());
+ }
//===--------------------------------------------------------------------===//
// LValue Expression Emission
@@ -3649,8 +4121,13 @@ public:
/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
/// variable length type, this is not possible.
///
- LValue EmitLValue(const Expr *E);
+ LValue EmitLValue(const Expr *E,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
+private:
+ LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
+
+public:
/// Same as EmitLValue but additionally we generate checking code to
/// guard against undefined behavior. This is only suitable when we know
/// that the address will be used to access the object.
@@ -3755,6 +4232,11 @@ public:
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc);
RValue EmitLoadOfGlobalRegLValue(LValue LV);
+ /// Like EmitLoadOfLValue but also handles complex and aggregate types.
+ RValue EmitLoadOfAnyValue(LValue V,
+ AggValueSlot Slot = AggValueSlot::ignored(),
+ SourceLocation Loc = {});
+
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
@@ -3792,13 +4274,14 @@ public:
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed = false);
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);
- LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
- bool IsLowerBound = true);
+ LValue EmitArraySectionExpr(const ArraySectionExpr *E,
+ bool IsLowerBound = true);
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
LValue EmitInitListLValue(const InitListExpr *E);
+ void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
@@ -3852,8 +4335,12 @@ public:
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+ llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
LValue EmitLValueForLambdaField(const FieldDecl *Field);
+ LValue EmitLValueForLambdaField(const FieldDecl *Field,
+ llvm::Value *ThisValue);
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
/// if the Field is a reference, this will return the address of the reference
@@ -3886,12 +4373,15 @@ public:
/// LLVM arguments and the types they were derived from.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::CallBase **callOrInvoke, SourceLocation Loc);
+ llvm::CallBase **callOrInvoke, bool IsMustTail,
+ SourceLocation Loc,
+ bool IsVirtualFunctionPointerThunk = false);
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::CallBase **callOrInvoke = nullptr) {
+ llvm::CallBase **callOrInvoke = nullptr,
+ bool IsMustTail = false) {
return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
- SourceLocation());
+ IsMustTail, SourceLocation());
}
RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
@@ -3911,6 +4401,9 @@ public:
llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const Twine &name = "");
llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
+ ArrayRef<Address> args,
+ const Twine &name = "");
+ llvm::CallInst *EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
ArrayRef<llvm::Value *> args,
const Twine &name = "");
@@ -3936,6 +4429,45 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
+ bool isPointerKnownNonNull(const Expr *E);
+
+ /// Create the discriminator from the storage address and the entity hash.
+ llvm::Value *EmitPointerAuthBlendDiscriminator(llvm::Value *StorageAddress,
+ llvm::Value *Discriminator);
+ CGPointerAuthInfo EmitPointerAuthInfo(const PointerAuthSchema &Schema,
+ llvm::Value *StorageAddress,
+ GlobalDecl SchemaDecl,
+ QualType SchemaType);
+
+ llvm::Value *EmitPointerAuthSign(const CGPointerAuthInfo &Info,
+ llvm::Value *Pointer);
+
+ llvm::Value *EmitPointerAuthAuth(const CGPointerAuthInfo &Info,
+ llvm::Value *Pointer);
+
+ llvm::Value *emitPointerAuthResign(llvm::Value *Pointer, QualType PointerType,
+ const CGPointerAuthInfo &CurAuthInfo,
+ const CGPointerAuthInfo &NewAuthInfo,
+ bool IsKnownNonNull);
+ llvm::Value *emitPointerAuthResignCall(llvm::Value *Pointer,
+ const CGPointerAuthInfo &CurInfo,
+ const CGPointerAuthInfo &NewInfo);
+
+ void EmitPointerAuthOperandBundle(
+ const CGPointerAuthInfo &Info,
+ SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
+
+ llvm::Value *authPointerToPointerCast(llvm::Value *ResultPtr,
+ QualType SourceType, QualType DestType);
+ Address authPointerToPointerCast(Address Ptr, QualType SourceType,
+ QualType DestType);
+
+ Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy);
+
+ llvm::Value *getAsNaturalPointerTo(Address Addr, QualType PointeeType) {
+ return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer();
+ }
+
// Return the copy constructor name with the prefix "__copy_constructor_"
// removed.
static std::string getNonTrivialCopyConstructorStr(QualType QT,
@@ -3995,10 +4527,9 @@ public:
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
- RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue);
- RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue);
+ RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E);
+ RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E);
+ RValue EmitOpenMPDevicePrintfCallExpr(const CallExpr *E);
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E, ReturnValueSlot ReturnValue);
@@ -4070,30 +4601,36 @@ public:
/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
/// access builtin. Only required if it can't be inferred from the base
/// pointer operand.
- llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags);
+ llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
- SmallVector<llvm::Type *, 2> getSVEOverloadTypes(SVETypeFlags TypeFlags,
- llvm::Type *ReturnType,
- ArrayRef<llvm::Value *> Ops);
- llvm::Type *getEltType(SVETypeFlags TypeFlags);
+ SmallVector<llvm::Type *, 2>
+ getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
- llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags);
- llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags);
+ llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
+ llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
+ llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
+ llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
- llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
llvm::SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
- llvm::Value *EmitSVEMovl(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
llvm::ArrayRef<llvm::Value *> Ops,
unsigned BuiltinID);
llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
llvm::ScalableVectorType *VTy);
- llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
llvm::SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
- llvm::Value *EmitSVEScatterStore(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
llvm::SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
@@ -4102,19 +4639,45 @@ public:
llvm::Value *EmitSVEMaskedStore(const CallExpr *,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
- llvm::Value *EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
- llvm::Value *EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
- llvm::Value *EmitSVEStructLoad(SVETypeFlags TypeFlags,
- SmallVectorImpl<llvm::Value *> &Ops, unsigned IntID);
- llvm::Value *EmitSVEStructStore(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
+ /// FormSVEBuiltinResult - Returns the struct of scalable vectors as a wider
+ /// vector. It extracts the scalable vector from the struct and inserts into
+ /// the wider vector. This avoids the error when allocating space in llvm
+ /// for struct of scalable vectors if a function returns struct.
+ llvm::Value *FormSVEBuiltinResult(llvm::Value *Call);
+
llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+
+ void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ SVETypeFlags TypeFlags);
+
+ llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
llvm::Triple::ArchType Arch);
llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -4123,12 +4686,20 @@ public:
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
+ const CallExpr *E);
llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E);
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
- bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
+ llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
+ const CallExpr *E);
+ void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID);
@@ -4213,6 +4784,8 @@ public:
void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
+ void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
+
static Destroyer destroyARCStrongImprecise;
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
@@ -4258,9 +4831,17 @@ public:
/// aggregate type into a temporary LValue.
LValue EmitAggExprToLValue(const Expr *E);
- /// Build all the stores needed to initialize an aggregate at Dest with the
- /// value Val.
- void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile);
+ enum ExprValueKind { EVK_RValue, EVK_NonRValue };
+
+ /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
+ /// destination address.
+ void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
+ ExprValueKind SrcKind);
+
+ /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
+ /// to at most \arg DstSize bytes.
+ void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
+ bool DstIsVolatile);
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
@@ -4282,6 +4863,11 @@ public:
/// EmitLoadOfComplex - Load a complex number from the specified l-value.
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
+ ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType);
+ llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
+ ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType);
+ ComplexPairTy EmitUnPromotedValue(ComplexPairTy result, QualType PromotionType);
+
Address emitAddrOfRealComponent(Address complex, QualType complexType);
Address emitAddrOfImagComponent(Address complex, QualType complexType);
@@ -4298,17 +4884,27 @@ public:
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
- void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
bool PerformInit);
- llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
+ llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
llvm::Constant *Addr);
+ llvm::Function *createTLSAtExitStub(const VarDecl &VD,
+ llvm::FunctionCallee Dtor,
+ llvm::Constant *Addr,
+ llvm::FunctionCallee &AtExit);
+
/// Call atexit() with a function that passes the given argument to
/// the given function.
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
llvm::Constant *addr);
+ /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
+ /// support an 'atexit()' function.
+ void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
+ llvm::Constant *addr);
+
/// Call atexit() with function dtorStub.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
@@ -4342,8 +4938,9 @@ public:
/// variables.
void GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
- const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsOrStermFinalizers);
+ ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
+ llvm::Constant *>>
+ DtorsOrStermFinalizers);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
@@ -4406,6 +5003,9 @@ public:
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
bool AllowLabels = false);
+ /// Ignore parentheses and logical-NOT to track conditions consistently.
+ static const Expr *stripCond(const Expr *C);
+
/// isInstrumentedCondition - Determine whether the given condition is an
/// instrumentable condition (i.e. no "&&" or "||").
static bool isInstrumentedCondition(const Expr *C);
@@ -4428,7 +5028,8 @@ public:
/// evaluate to true based on PGO data.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
- Stmt::Likelihood LH = Stmt::LH_None);
+ Stmt::Likelihood LH = Stmt::LH_None,
+ const Expr *ConditionalOp = nullptr);
/// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
/// nonnull, if \p LHS is marked _Nonnull.
@@ -4443,13 +5044,18 @@ public:
/// \p SignedIndices indicates whether any of the GEP indices are signed.
/// \p IsSubtraction indicates whether the expression used to form the GEP
/// is a subtraction.
- llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
ArrayRef<llvm::Value *> IdxList,
bool SignedIndices,
bool IsSubtraction,
SourceLocation Loc,
const Twine &Name = "");
+ Address EmitCheckedInBoundsGEP(Address Addr, ArrayRef<llvm::Value *> IdxList,
+ llvm::Type *elementType, bool SignedIndices,
+ bool IsSubtraction, SourceLocation Loc,
+ CharUnits Align, const Twine &Name = "");
+
/// Specifies which type of sanitizer check to apply when handling a
/// particular builtin.
enum BuiltinCheckKind {
@@ -4473,6 +5079,9 @@ public:
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
+ void EmitKCFIOperandBundle(const CGCallee &Callee,
+ SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
+
/// Create a basic block that will either trap or call a handler function in
/// the UBSan runtime with the provided arguments, and create a conditional
/// branch to it.
@@ -4509,6 +5118,10 @@ public:
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
AbstractCallee AC, unsigned ParmNum);
+ void EmitNonNullArgCheck(Address Addr, QualType ArgType,
+ SourceLocation ArgLoc, AbstractCallee AC,
+ unsigned ParmNum);
+
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
@@ -4522,20 +5135,53 @@ public:
/// point operation, expressed as the maximum relative error in ulp.
void SetFPAccuracy(llvm::Value *Val, float Accuracy);
- /// SetFPModel - Control floating point behavior via fp-model settings.
- void SetFPModel();
+ /// Set the minimum required accuracy of the given sqrt operation
+ /// based on CodeGenOpts.
+ void SetSqrtFPAccuracy(llvm::Value *Val);
+
+ /// Set the minimum required accuracy of the given sqrt operation based on
+ /// CodeGenOpts.
+ void SetDivFPAccuracy(llvm::Value *Val);
/// Set the codegen fast-math flags.
void SetFastMathFlags(FPOptions FPFeatures);
+ // Truncate or extend a boolean vector to the requested number of elements.
+ llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
+ unsigned NumElementsDst,
+ const llvm::Twine &Name = "");
+ // Adds a convergence_ctrl token to |Input| and emits the required parent
+ // convergence instructions.
+ template <typename CallType>
+ CallType *addControlledConvergenceToken(CallType *Input) {
+ return cast<CallType>(
+ addConvergenceControlToken(Input, ConvergenceTokenStack.back()));
+ }
+
+private:
+ // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
+ // as it's parent convergence instr.
+ llvm::IntrinsicInst *emitConvergenceLoopToken(llvm::BasicBlock *BB,
+ llvm::Value *ParentToken);
+ // Adds a convergence_ctrl token with |ParentToken| as parent convergence
+ // instr to the call |Input|.
+ llvm::CallBase *addConvergenceControlToken(llvm::CallBase *Input,
+ llvm::Value *ParentToken);
+ // Find the convergence_entry instruction |F|, or emits ones if none exists.
+ // Returns the convergence instruction.
+ llvm::IntrinsicInst *getOrEmitConvergenceEntryToken(llvm::Function *F);
+ // Find the convergence_loop instruction for the loop defined by |LI|, or
+ // emits one if none exists. Returns the convergence instruction.
+ llvm::IntrinsicInst *getOrEmitConvergenceLoopToken(const LoopInfo *LI);
+
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
- llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
- DeferredReplacements;
+ llvm::SmallVector<std::pair<llvm::WeakTrackingVH, llvm::Value *>, 4>
+ DeferredReplacements;
/// Set the address of a local variable.
void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
@@ -4557,13 +5203,14 @@ private:
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
- llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr, std::string &ConstraintStr);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
+ std::string &ConstraintStr);
- llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr,
+ SourceLocation Loc);
/// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
@@ -4583,6 +5230,9 @@ private:
llvm::Value *EmittedE,
bool IsDynamic);
+ llvm::Value *emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType);
+
void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
Address Loc);
@@ -4628,9 +5278,10 @@ public:
/// into the address of a local variable. In such a case, it's quite
/// reasonable to just ignore the returned alignment when it isn't from an
/// explicit source.
- Address EmitPointerWithAlignment(const Expr *Addr,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr);
+ Address
+ EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
/// If \p E references a parameter with pass_object_size info or a constant
/// array size modifier, emit the object size divided by the size of \p EltTy.
@@ -4641,7 +5292,6 @@ public:
struct MultiVersionResolverOption {
llvm::Function *Function;
- FunctionDecl *FD;
struct Conds {
StringRef Architecture;
llvm::SmallVector<StringRef, 8> Features;
@@ -4660,8 +5310,12 @@ public:
// last (if it exists).
void EmitMultiVersionResolver(llvm::Function *Resolver,
ArrayRef<MultiVersionResolverOption> Options);
-
- static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
+ void
+ EmitX86MultiVersionResolver(llvm::Function *Resolver,
+ ArrayRef<MultiVersionResolverOption> Options);
+ void
+ EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
+ ArrayRef<MultiVersionResolverOption> Options);
private:
QualType getVarArgType(const Expr *Arg);
@@ -4678,80 +5332,14 @@ private:
llvm::Value *EmitX86CpuIs(StringRef CPUStr);
llvm::Value *EmitX86CpuSupports(const CallExpr *E);
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
- llvm::Value *EmitX86CpuSupports(uint64_t Mask);
+ llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
llvm::Value *EmitX86CpuInit();
- llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
-};
-
-/// TargetFeatures - This class is used to check whether the builtin function
-/// has the required tagert specific features. It is able to support the
-/// combination of ','(and), '|'(or), and '()'. By default, the priority of
-/// ',' is higher than that of '|' .
-/// E.g:
-/// A,B|C means the builtin function requires both A and B, or C.
-/// If we want the builtin function requires both A and B, or both A and C,
-/// there are two ways: A,B|A,C or A,(B|C).
-/// The FeaturesList should not contain spaces, and brackets must appear in
-/// pairs.
-class TargetFeatures {
- struct FeatureListStatus {
- bool HasFeatures;
- StringRef CurFeaturesList;
- };
-
- const llvm::StringMap<bool> &CallerFeatureMap;
-
- FeatureListStatus getAndFeatures(StringRef FeatureList) {
- int InParentheses = 0;
- bool HasFeatures = true;
- size_t SubexpressionStart = 0;
- for (size_t i = 0, e = FeatureList.size(); i < e; ++i) {
- char CurrentToken = FeatureList[i];
- switch (CurrentToken) {
- default:
- break;
- case '(':
- if (InParentheses == 0)
- SubexpressionStart = i + 1;
- ++InParentheses;
- break;
- case ')':
- --InParentheses;
- assert(InParentheses >= 0 && "Parentheses are not in pair");
- LLVM_FALLTHROUGH;
- case '|':
- case ',':
- if (InParentheses == 0) {
- if (HasFeatures && i != SubexpressionStart) {
- StringRef F = FeatureList.slice(SubexpressionStart, i);
- HasFeatures = CurrentToken == ')' ? hasRequiredFeatures(F)
- : CallerFeatureMap.lookup(F);
- }
- SubexpressionStart = i + 1;
- if (CurrentToken == '|') {
- return {HasFeatures, FeatureList.substr(SubexpressionStart)};
- }
- }
- break;
- }
- }
- assert(InParentheses == 0 && "Parentheses are not in pair");
- if (HasFeatures && SubexpressionStart != FeatureList.size())
- HasFeatures =
- CallerFeatureMap.lookup(FeatureList.substr(SubexpressionStart));
- return {HasFeatures, StringRef()};
- }
-
-public:
- bool hasRequiredFeatures(StringRef FeatureList) {
- FeatureListStatus FS = {false, FeatureList};
- while (!FS.HasFeatures && !FS.CurFeaturesList.empty())
- FS = getAndFeatures(FS.CurFeaturesList);
- return FS.HasFeatures;
- }
-
- TargetFeatures(const llvm::StringMap<bool> &CallerFeatureMap)
- : CallerFeatureMap(CallerFeatureMap) {}
+ llvm::Value *FormX86ResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *EmitAArch64CpuInit();
+ llvm::Value *
+ FormAArch64ResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
+ llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
};
inline DominatingLLVMValue::saved_type
@@ -4760,12 +5348,12 @@ DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
// Otherwise, we need an alloca.
auto align = CharUnits::fromQuantity(
- CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
Address alloca =
- CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
- return saved_type(alloca.getPointer(), true);
+ return saved_type(alloca.emitRawPointer(CGF), true);
}
inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
@@ -4775,7 +5363,8 @@ inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
// Otherwise, it should be an alloca instruction, as set up in save().
auto alloca = cast<llvm::AllocaInst>(value.getPointer());
- return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlign());
+ return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
+ alloca->getAlign());
}
} // end namespace CodeGen