diff options
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp')
-rw-r--r-- | contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp | 710 |
1 files changed, 442 insertions, 268 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp index ba221dbbc83b..e5ba50de3462 100644 --- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp +++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "ABIInfoImpl.h" #include "CGBlocks.h" #include "CGCXXABI.h" #include "CGDebugInfo.h" @@ -28,7 +29,9 @@ #include "clang/CodeGen/CGFunctionInfo.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Metadata.h" +#include "llvm/Support/SaveAndRestore.h" #include "llvm/Transforms/Utils/SanitizerStats.h" +#include <optional> using namespace clang; using namespace CodeGen; @@ -127,18 +130,19 @@ CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign, Address CodeGenFunction::LoadCXXThisAddress() { assert(CurFuncDecl && "loading 'this' without a func declaration?"); - assert(isa<CXXMethodDecl>(CurFuncDecl)); + auto *MD = cast<CXXMethodDecl>(CurFuncDecl); // Lazily compute CXXThisAlignment. if (CXXThisAlignment.isZero()) { // Just use the best known alignment for the parent. // TODO: if we're currently emitting a complete-object ctor/dtor, // we can always use the complete-object alignment. - auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent(); - CXXThisAlignment = CGM.getClassPointerAlignment(RD); + CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent()); } - return Address(LoadCXXThis(), CXXThisAlignment); + return makeNaturalAddressForPointer( + LoadCXXThis(), MD->getFunctionObjectParameterType(), CXXThisAlignment, + false, nullptr, nullptr, KnownNonNull); } /// Emit the address of a field using a member data pointer. @@ -162,7 +166,8 @@ CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base, CGM.getDynamicOffsetAlignment(base.getAlignment(), memberPtrType->getClass()->getAsCXXRecordDecl(), memberAlign); - return Address(ptr, memberAlign); + return Address(ptr, ConvertTypeForMem(memberPtrType->getPointeeType()), + memberAlign); } CharUnits CodeGenModule::computeNonVirtualBaseClassOffset( @@ -204,7 +209,7 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, return nullptr; llvm::Type *PtrDiffTy = - Types.ConvertType(getContext().getPointerDiffType()); + getTypes().ConvertType(getContext().getPointerDiffType()); return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); } @@ -234,12 +239,10 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This, // TODO: for complete types, this should be possible with a GEP. Address V = This; if (!Offset.isZero()) { - V = Builder.CreateElementBitCast(V, Int8Ty); + V = V.withElementType(Int8Ty); V = Builder.CreateConstInBoundsByteGEP(V, Offset); } - V = Builder.CreateElementBitCast(V, ConvertType(Base)); - - return V; + return V.withElementType(ConvertType(Base)); } static Address @@ -269,10 +272,8 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr, } // Apply the base offset. - llvm::Value *ptr = addr.getPointer(); - unsigned AddrSpace = ptr->getType()->getPointerAddressSpace(); - ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8Ty->getPointerTo(AddrSpace)); - ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); + llvm::Value *ptr = addr.emitRawPointer(CGF); + ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr"); // If we have a virtual component, the alignment of the result will // be relative only to the known alignment of that vbase. @@ -286,7 +287,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr, } alignment = alignment.alignmentAtOffset(nonVirtualOffset); - return Address(ptr, alignment); + return Address(ptr, CGF.Int8Ty, alignment); } Address CodeGenFunction::GetAddressOfBaseClass( @@ -326,9 +327,9 @@ Address CodeGenFunction::GetAddressOfBaseClass( } // Get the base pointer type. - llvm::Type *BasePtrTy = - ConvertType((PathEnd[-1])->getType()) - ->getPointerTo(Value.getType()->getPointerAddressSpace()); + llvm::Type *BaseValueTy = ConvertType((PathEnd[-1])->getType()); + llvm::Type *PtrTy = llvm::PointerType::get( + CGM.getLLVMContext(), Value.getType()->getPointerAddressSpace()); QualType DerivedTy = getContext().getRecordType(Derived); CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived); @@ -339,10 +340,10 @@ Address CodeGenFunction::GetAddressOfBaseClass( if (sanitizePerformTypeCheck()) { SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::Null, !NullCheckValue); - EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(), - DerivedTy, DerivedAlign, SkippedChecks); + EmitTypeCheck(TCK_Upcast, Loc, Value.emitRawPointer(*this), DerivedTy, + DerivedAlign, SkippedChecks); } - return Builder.CreateBitCast(Value, BasePtrTy); + return Value.withElementType(BaseValueTy); } llvm::BasicBlock *origBB = nullptr; @@ -355,7 +356,7 @@ Address CodeGenFunction::GetAddressOfBaseClass( llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); endBB = createBasicBlock("cast.end"); - llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer()); + llvm::Value *isNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(isNull, endBB, notNullBB); EmitBlock(notNullBB); } @@ -364,14 +365,15 @@ Address CodeGenFunction::GetAddressOfBaseClass( SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::Null, true); EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, - Value.getPointer(), DerivedTy, DerivedAlign, SkippedChecks); + Value.emitRawPointer(*this), DerivedTy, DerivedAlign, + SkippedChecks); } // Compute the virtual offset. llvm::Value *VirtualOffset = nullptr; if (VBase) { VirtualOffset = - CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); + CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); } // Apply both offsets. @@ -379,7 +381,7 @@ Address CodeGenFunction::GetAddressOfBaseClass( VirtualOffset, Derived, VBase); // Cast to the destination type. - Value = Builder.CreateBitCast(Value, BasePtrTy); + Value = Value.withElementType(BaseValueTy); // Build a phi if we needed a null check. if (NullCheckValue) { @@ -387,10 +389,10 @@ Address CodeGenFunction::GetAddressOfBaseClass( Builder.CreateBr(endBB); EmitBlock(endBB); - llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); - PHI->addIncoming(Value.getPointer(), notNullBB); - PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); - Value = Address(PHI, Value.getAlignment()); + llvm::PHINode *PHI = Builder.CreatePHI(PtrTy, 2, "cast.result"); + PHI->addIncoming(Value.emitRawPointer(*this), notNullBB); + PHI->addIncoming(llvm::Constant::getNullValue(PtrTy), origBB); + Value = Value.withPointer(PHI, NotKnownNonNull); } return Value; @@ -405,17 +407,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr, assert(PathBegin != PathEnd && "Base path should not be empty!"); QualType DerivedTy = - getContext().getCanonicalType(getContext().getTagDeclType(Derived)); - unsigned AddrSpace = - BaseAddr.getPointer()->getType()->getPointerAddressSpace(); - llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(AddrSpace); + getContext().getCanonicalType(getContext().getTagDeclType(Derived)); + llvm::Type *DerivedValueTy = ConvertType(DerivedTy); llvm::Value *NonVirtualOffset = CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); if (!NonVirtualOffset) { // No offset, we can just cast back. - return Builder.CreateBitCast(BaseAddr, DerivedPtrTy); + return BaseAddr.withElementType(DerivedValueTy); } llvm::BasicBlock *CastNull = nullptr; @@ -427,18 +427,19 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr, CastNotNull = createBasicBlock("cast.notnull"); CastEnd = createBasicBlock("cast.end"); - llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer()); + llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } // Apply the offset. - llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy); - Value = Builder.CreateInBoundsGEP(Value, Builder.CreateNeg(NonVirtualOffset), - "sub.ptr"); + Address Addr = BaseAddr.withElementType(Int8Ty); + Addr = Builder.CreateInBoundsGEP( + Addr, Builder.CreateNeg(NonVirtualOffset), Int8Ty, + CGM.getClassPointerAlignment(Derived), "sub.ptr"); // Just cast. - Value = Builder.CreateBitCast(Value, DerivedPtrTy); + Addr = Addr.withElementType(DerivedValueTy); // Produce a PHI if we had a null-check. if (NullCheckValue) { @@ -447,13 +448,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr, Builder.CreateBr(CastEnd); EmitBlock(CastEnd); + llvm::Value *Value = Addr.emitRawPointer(*this); llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); PHI->addIncoming(Value, CastNotNull); PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); - Value = PHI; + return Address(PHI, Addr.getElementType(), + CGM.getClassPointerAlignment(Derived)); } - return Address(Value, CGM.getClassPointerAlignment(Derived)); + return Addr; } llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, @@ -467,8 +470,6 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); - llvm::Value *VTT; - uint64_t SubVTTIndex; if (Delegating) { @@ -494,15 +495,14 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { // A VTT parameter was passed to the constructor, use it. - VTT = LoadCXXVTT(); - VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); + llvm::Value *VTT = LoadCXXVTT(); + return Builder.CreateConstInBoundsGEP1_64(VoidPtrTy, VTT, SubVTTIndex); } else { // We're the complete constructor, so get the VTT by name. - VTT = CGM.getVTables().GetAddrOfVTT(RD); - VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); + llvm::GlobalValue *VTT = CGM.getVTables().GetAddrOfVTT(RD); + return Builder.CreateConstInBoundsGEP2_64( + VTT->getValueType(), VTT, 0, SubVTTIndex); } - - return VTT; } namespace { @@ -520,7 +520,7 @@ namespace { const CXXDestructorDecl *D = BaseClass->getDestructor(); // We are already inside a destructor, so presumably the object being // destroyed should have the expected type. - QualType ThisTy = D->getThisObjectType(); + QualType ThisTy = D->getFunctionObjectParameterType(); Address Addr = CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, @@ -681,7 +681,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, // the constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (CGF.needsEHCleanup(dtorKind)) - CGF.pushEHDestroy(dtorKind, LHS.getAddress(CGF), FieldType); + CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); return; } } @@ -706,9 +706,9 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS, break; case TEK_Aggregate: { AggValueSlot Slot = AggValueSlot::forLValue( - LHS, *this, AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, - getOverlapForFieldInit(Field), AggValueSlot::IsNotZeroed, + LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field), + AggValueSlot::IsNotZeroed, // Checks are made by the code that calls constructor. AggValueSlot::IsSanitizerChecked); EmitAggExpr(Init, Slot); @@ -720,7 +720,7 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS, // later in the constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (needsEHCleanup(dtorKind)) - pushEHDestroy(dtorKind, LHS.getAddress(*this), FieldType); + pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); } /// Checks whether the given constructor is a valid subject for the @@ -860,11 +860,12 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { // Enter the function-try-block before the constructor prologue if // applicable. - bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); + bool IsTryBody = isa_and_nonnull<CXXTryStmt>(Body); if (IsTryBody) EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); incrementProfileCounter(Body); + maybeCreateMCDCCondBitmap(); RunCleanupsScope RunCleanups(*this); @@ -933,7 +934,7 @@ namespace { } void addMemcpyableField(FieldDecl *F) { - if (F->isZeroSize(CGF.getContext())) + if (isEmptyFieldForLayout(CGF.getContext(), F)) return; if (!FirstField) addInitialField(F); @@ -983,8 +984,8 @@ namespace { LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); emitMemcpyIR( - Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(CGF), - Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(CGF), + Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(), + Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(), MemcpySize); reset(); } @@ -999,16 +1000,8 @@ namespace { private: void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { - llvm::PointerType *DPT = DestPtr.getType(); - llvm::Type *DBP = - llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); - DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); - - llvm::PointerType *SPT = SrcPtr.getType(); - llvm::Type *SBP = - llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); - SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); - + DestPtr = DestPtr.withElementType(CGF.Int8Ty); + SrcPtr = SrcPtr.withElementType(CGF.Int8Ty); CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity()); } @@ -1139,7 +1132,7 @@ namespace { continue; LValue FieldLHS = LHS; EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); - CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(CGF), FieldType); + CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); } } @@ -1309,10 +1302,10 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, assert(BaseCtorContinueBB); } - llvm::Value *const OldThis = CXXThisValue; for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { if (!ConstructVBases) continue; + SaveAndRestore ThisRAII(CXXThisValue); if (CGM.getCodeGenOpts().StrictVTablePointers && CGM.getCodeGenOpts().OptimizationLevel > 0 && isInitializerOfDynamicClass(*B)) @@ -1329,7 +1322,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, // Then, non-virtual base initializers. for (; B != E && (*B)->isBaseInitializer(); B++) { assert(!(*B)->isBaseVirtual()); - + SaveAndRestore ThisRAII(CXXThisValue); if (CGM.getCodeGenOpts().StrictVTablePointers && CGM.getCodeGenOpts().OptimizationLevel > 0 && isInitializerOfDynamicClass(*B)) @@ -1337,8 +1330,6 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, EmitBaseInitializer(*this, ClassDecl, *B); } - CXXThisValue = OldThis; - InitializeVTablePointers(ClassDecl); // And finally, initialize class members. @@ -1414,7 +1405,7 @@ FieldHasTrivialDestructorBody(ASTContext &Context, // The destructor for an implicit anonymous union member is never invoked. if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) - return false; + return true; return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); } @@ -1427,6 +1418,11 @@ static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF, if (!ClassDecl->isDynamicClass()) return true; + // For a final class, the vtable pointer is known to already point to the + // class's vtable. + if (ClassDecl->isEffectivelyFinal()) + return true; + if (!Dtor->hasTrivialBody()) return false; @@ -1458,8 +1454,10 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { } Stmt *Body = Dtor->getBody(); - if (Body) + if (Body) { incrementProfileCounter(Body); + maybeCreateMCDCCondBitmap(); + } // The call to operator delete in a deleting destructor happens // outside of the function-try-block, which means it's always @@ -1469,7 +1467,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { RunCleanupsScope DtorEpilogue(*this); EnterDtorCleanups(Dtor, Dtor_Deleting); if (HaveInsertPoint()) { - QualType ThisTy = Dtor->getThisObjectType(); + QualType ThisTy = Dtor->getFunctionObjectParameterType(); EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, /*Delegating=*/false, LoadCXXThisAddress(), ThisTy); } @@ -1478,7 +1476,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { // If the body is a function-try-block, enter the try before // anything else. - bool isTryBody = (Body && isa<CXXTryStmt>(Body)); + bool isTryBody = isa_and_nonnull<CXXTryStmt>(Body); if (isTryBody) EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); EmitAsanPrologueOrEpilogue(false); @@ -1503,14 +1501,14 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { EnterDtorCleanups(Dtor, Dtor_Complete); if (!isTryBody) { - QualType ThisTy = Dtor->getThisObjectType(); + QualType ThisTy = Dtor->getFunctionObjectParameterType(); EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, /*Delegating=*/false, LoadCXXThisAddress(), ThisTy); break; } // Fallthrough: act like we're in the base variant. - LLVM_FALLTHROUGH; + [[fallthrough]]; case Dtor_Base: assert(Body); @@ -1562,6 +1560,7 @@ void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) LexicalScope Scope(*this, RootCS->getSourceRange()); incrementProfileCounter(RootCS); + maybeCreateMCDCCondBitmap(); AssignmentMemcpyizer AM(*this, AssignOp, Args); for (auto *I : RootCS->body()) AM.emitAssignment(I); @@ -1649,116 +1648,140 @@ namespace { LValue LV = CGF.EmitLValueForField(ThisLV, field); assert(LV.isSimple()); - CGF.emitDestroy(LV.getAddress(CGF), field->getType(), destroyer, + CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } }; - static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr, - CharUnits::QuantityType PoisonSize) { - CodeGenFunction::SanitizerScope SanScope(&CGF); - // Pass in void pointer and size of region as arguments to runtime - // function - llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy), - llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)}; + class DeclAsInlineDebugLocation { + CGDebugInfo *DI; + llvm::MDNode *InlinedAt; + std::optional<ApplyDebugLocation> Location; - llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy}; + public: + DeclAsInlineDebugLocation(CodeGenFunction &CGF, const NamedDecl &Decl) + : DI(CGF.getDebugInfo()) { + if (!DI) + return; + InlinedAt = DI->getInlinedAt(); + DI->setInlinedAt(CGF.Builder.getCurrentDebugLocation()); + Location.emplace(CGF, Decl.getLocation()); + } - llvm::FunctionType *FnType = - llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false); - llvm::FunctionCallee Fn = - CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback"); - CGF.EmitNounwindRuntimeCall(Fn, Args); - } + ~DeclAsInlineDebugLocation() { + if (!DI) + return; + Location.reset(); + DI->setInlinedAt(InlinedAt); + } + }; - class SanitizeDtorMembers final : public EHScopeStack::Cleanup { - const CXXDestructorDecl *Dtor; + static void EmitSanitizerDtorCallback( + CodeGenFunction &CGF, StringRef Name, llvm::Value *Ptr, + std::optional<CharUnits::QuantityType> PoisonSize = {}) { + CodeGenFunction::SanitizerScope SanScope(&CGF); + // Pass in void pointer and size of region as arguments to runtime + // function + SmallVector<llvm::Value *, 2> Args = {Ptr}; + SmallVector<llvm::Type *, 2> ArgTypes = {CGF.VoidPtrTy}; + + if (PoisonSize.has_value()) { + Args.emplace_back(llvm::ConstantInt::get(CGF.SizeTy, *PoisonSize)); + ArgTypes.emplace_back(CGF.SizeTy); + } - public: - SanitizeDtorMembers(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {} + llvm::FunctionType *FnType = + llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false); + llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(FnType, Name); + + CGF.EmitNounwindRuntimeCall(Fn, Args); + } + + static void + EmitSanitizerDtorFieldsCallback(CodeGenFunction &CGF, llvm::Value *Ptr, + CharUnits::QuantityType PoisonSize) { + EmitSanitizerDtorCallback(CGF, "__sanitizer_dtor_callback_fields", Ptr, + PoisonSize); + } + + /// Poison base class with a trivial destructor. + struct SanitizeDtorTrivialBase final : EHScopeStack::Cleanup { + const CXXRecordDecl *BaseClass; + bool BaseIsVirtual; + SanitizeDtorTrivialBase(const CXXRecordDecl *Base, bool BaseIsVirtual) + : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} - // Generate function call for handling object poisoning. - // Disables tail call elimination, to prevent the current stack frame - // from disappearing from the stack trace. void Emit(CodeGenFunction &CGF, Flags flags) override { - const ASTRecordLayout &Layout = - CGF.getContext().getASTRecordLayout(Dtor->getParent()); + const CXXRecordDecl *DerivedClass = + cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); + + Address Addr = CGF.GetAddressOfDirectBaseInCompleteClass( + CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, BaseIsVirtual); + + const ASTRecordLayout &BaseLayout = + CGF.getContext().getASTRecordLayout(BaseClass); + CharUnits BaseSize = BaseLayout.getSize(); - // Nothing to poison. - if (Layout.getFieldCount() == 0) + if (!BaseSize.isPositive()) return; + // Use the base class declaration location as inline DebugLocation. All + // fields of the class are destroyed. + DeclAsInlineDebugLocation InlineHere(CGF, *BaseClass); + EmitSanitizerDtorFieldsCallback(CGF, Addr.emitRawPointer(CGF), + BaseSize.getQuantity()); + // Prevent the current stack frame from disappearing from the stack trace. CGF.CurFn->addFnAttr("disable-tail-calls", "true"); - - // Construct pointer to region to begin poisoning, and calculate poison - // size, so that only members declared in this class are poisoned. - ASTContext &Context = CGF.getContext(); - - const RecordDecl *Decl = Dtor->getParent(); - auto Fields = Decl->fields(); - auto IsTrivial = [&](const FieldDecl *F) { - return FieldHasTrivialDestructorBody(Context, F); - }; - - auto IsZeroSize = [&](const FieldDecl *F) { - return F->isZeroSize(Context); - }; - - // Poison blocks of fields with trivial destructors making sure that block - // begin and end do not point to zero-sized fields. They don't have - // correct offsets so can't be used to calculate poisoning range. - for (auto It = Fields.begin(); It != Fields.end();) { - It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) { - return IsTrivial(F) && !IsZeroSize(F); - }); - if (It == Fields.end()) - break; - auto Start = It++; - It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) { - return !IsTrivial(F) && !IsZeroSize(F); - }); - - PoisonMembers(CGF, (*Start)->getFieldIndex(), - It == Fields.end() ? -1 : (*It)->getFieldIndex()); - } } + }; - private: - /// \param layoutStartOffset index of the ASTRecordLayout field to - /// start poisoning (inclusive) - /// \param layoutEndOffset index of the ASTRecordLayout field to - /// end poisoning (exclusive) - void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset, - unsigned layoutEndOffset) { - ASTContext &Context = CGF.getContext(); + class SanitizeDtorFieldRange final : public EHScopeStack::Cleanup { + const CXXDestructorDecl *Dtor; + unsigned StartIndex; + unsigned EndIndex; + + public: + SanitizeDtorFieldRange(const CXXDestructorDecl *Dtor, unsigned StartIndex, + unsigned EndIndex) + : Dtor(Dtor), StartIndex(StartIndex), EndIndex(EndIndex) {} + + // Generate function call for handling object poisoning. + // Disables tail call elimination, to prevent the current stack frame + // from disappearing from the stack trace. + void Emit(CodeGenFunction &CGF, Flags flags) override { + const ASTContext &Context = CGF.getContext(); const ASTRecordLayout &Layout = Context.getASTRecordLayout(Dtor->getParent()); - // It's a first trivia field so it should be at the begining of char, + // It's a first trivial field so it should be at the begining of a char, // still round up start offset just in case. - CharUnits PoisonStart = - Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset) + - Context.getCharWidth() - 1); + CharUnits PoisonStart = Context.toCharUnitsFromBits( + Layout.getFieldOffset(StartIndex) + Context.getCharWidth() - 1); llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get(CGF.SizeTy, PoisonStart.getQuantity()); - llvm::Value *OffsetPtr = CGF.Builder.CreateGEP( - CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy), - OffsetSizePtr); + llvm::Value *OffsetPtr = + CGF.Builder.CreateGEP(CGF.Int8Ty, CGF.LoadCXXThis(), OffsetSizePtr); CharUnits PoisonEnd; - if (layoutEndOffset >= Layout.getFieldCount()) { + if (EndIndex >= Layout.getFieldCount()) { PoisonEnd = Layout.getNonVirtualSize(); } else { PoisonEnd = - Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutEndOffset)); + Context.toCharUnitsFromBits(Layout.getFieldOffset(EndIndex)); } CharUnits PoisonSize = PoisonEnd - PoisonStart; if (!PoisonSize.isPositive()) return; - EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize.getQuantity()); + // Use the top field declaration location as inline DebugLocation. + DeclAsInlineDebugLocation InlineHere( + CGF, **std::next(Dtor->getParent()->field_begin(), StartIndex)); + EmitSanitizerDtorFieldsCallback(CGF, OffsetPtr, PoisonSize.getQuantity()); + + // Prevent the current stack frame from disappearing from the stack trace. + CGF.CurFn->addFnAttr("disable-tail-calls", "true"); } }; @@ -1772,17 +1795,45 @@ namespace { void Emit(CodeGenFunction &CGF, Flags flags) override { assert(Dtor->getParent()->isDynamicClass()); (void)Dtor; - ASTContext &Context = CGF.getContext(); // Poison vtable and vtable ptr if they exist for this class. llvm::Value *VTablePtr = CGF.LoadCXXThis(); - CharUnits::QuantityType PoisonSize = - Context.toCharUnitsFromBits(CGF.PointerWidthInBits).getQuantity(); // Pass in void pointer and size of region as arguments to runtime // function - EmitSanitizerDtorCallback(CGF, VTablePtr, PoisonSize); + EmitSanitizerDtorCallback(CGF, "__sanitizer_dtor_callback_vptr", + VTablePtr); } }; + + class SanitizeDtorCleanupBuilder { + ASTContext &Context; + EHScopeStack &EHStack; + const CXXDestructorDecl *DD; + std::optional<unsigned> StartIndex; + + public: + SanitizeDtorCleanupBuilder(ASTContext &Context, EHScopeStack &EHStack, + const CXXDestructorDecl *DD) + : Context(Context), EHStack(EHStack), DD(DD), StartIndex(std::nullopt) {} + void PushCleanupForField(const FieldDecl *Field) { + if (isEmptyFieldForLayout(Context, Field)) + return; + unsigned FieldIndex = Field->getFieldIndex(); + if (FieldHasTrivialDestructorBody(Context, Field)) { + if (!StartIndex) + StartIndex = FieldIndex; + } else if (StartIndex) { + EHStack.pushCleanup<SanitizeDtorFieldRange>(NormalAndEHCleanup, DD, + *StartIndex, FieldIndex); + StartIndex = std::nullopt; + } + } + void End() { + if (StartIndex) + EHStack.pushCleanup<SanitizeDtorFieldRange>(NormalAndEHCleanup, DD, + *StartIndex, -1); + } + }; } // end anonymous namespace /// Emit all code that comes at the end of class's @@ -1845,13 +1896,19 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, auto *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl()); - // Ignore trivial destructors. - if (BaseClassDecl->hasTrivialDestructor()) - continue; - - EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, - BaseClassDecl, - /*BaseIsVirtual*/ true); + if (BaseClassDecl->hasTrivialDestructor()) { + // Under SanitizeMemoryUseAfterDtor, poison the trivial base class + // memory. For non-trival base classes the same is done in the class + // destructor. + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty()) + EHStack.pushCleanup<SanitizeDtorTrivialBase>(NormalAndEHCleanup, + BaseClassDecl, + /*BaseIsVirtual*/ true); + } else { + EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl, + /*BaseIsVirtual*/ true); + } } return; @@ -1873,36 +1930,46 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); - // Ignore trivial destructors. - if (BaseClassDecl->hasTrivialDestructor()) - continue; - - EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, - BaseClassDecl, - /*BaseIsVirtual*/ false); + if (BaseClassDecl->hasTrivialDestructor()) { + if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty()) + EHStack.pushCleanup<SanitizeDtorTrivialBase>(NormalAndEHCleanup, + BaseClassDecl, + /*BaseIsVirtual*/ false); + } else { + EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl, + /*BaseIsVirtual*/ false); + } } // Poison fields such that access after their destructors are // invoked, and before the base class destructor runs, is invalid. - if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && - SanOpts.has(SanitizerKind::Memory)) - EHStack.pushCleanup<SanitizeDtorMembers>(NormalAndEHCleanup, DD); + bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && + SanOpts.has(SanitizerKind::Memory); + SanitizeDtorCleanupBuilder SanitizeBuilder(getContext(), EHStack, DD); // Destroy direct fields. for (const auto *Field : ClassDecl->fields()) { + if (SanitizeFields) + SanitizeBuilder.PushCleanupForField(Field); + QualType type = Field->getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); - if (!dtorKind) continue; + if (!dtorKind) + continue; // Anonymous union members do not have their destructors called. const RecordType *RT = type->getAsUnionType(); - if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; + if (RT && RT->getDecl()->isAnonymousStructOrUnion()) + continue; CleanupKind cleanupKind = getCleanupKind(dtorKind); - EHStack.pushCleanup<DestroyField>(cleanupKind, Field, - getDestroyer(dtorKind), - cleanupKind & EHCleanup); + EHStack.pushCleanup<DestroyField>( + cleanupKind, Field, getDestroyer(dtorKind), cleanupKind & EHCleanup); } + + if (SanitizeFields) + SanitizeBuilder.End(); } /// EmitCXXAggrConstructorCall - Emit a loop to call a particular @@ -1963,9 +2030,10 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, } // Find the end of the array. - llvm::Value *arrayBegin = arrayBase.getPointer(); - llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, - "arrayctor.end"); + llvm::Type *elementType = arrayBase.getElementType(); + llvm::Value *arrayBegin = arrayBase.emitRawPointer(*this); + llvm::Value *arrayEnd = Builder.CreateInBoundsGEP( + elementType, arrayBegin, numElements, "arrayctor.end"); // Enter the loop, setting up a phi for the current location to initialize. llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); @@ -1987,7 +2055,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, CharUnits eltAlignment = arrayBase.getAlignment() .alignmentOfArrayElement(getContext().getTypeSizeInChars(type)); - Address curAddr = Address(cur, eltAlignment); + Address curAddr = Address(cur, elementType, eltAlignment); // Zero initialize the storage, if requested. if (zeroInitialize) @@ -2023,9 +2091,8 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, } // Go to the next element. - llvm::Value *next = - Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), - "arrayctor.next"); + llvm::Value *next = Builder.CreateInBoundsGEP( + elementType, cur, llvm::ConstantInt::get(SizeTy, 1), "arrayctor.next"); cur->addIncoming(next, Builder.GetInsertBlock()); // Check whether that's the end of the loop. @@ -2059,16 +2126,16 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, CallArgList Args; Address This = ThisAVS.getAddress(); LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); - QualType ThisType = D->getThisType(); - LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace(); - llvm::Value *ThisPtr = This.getPointer(); + LangAS ThisAS = D->getFunctionObjectParameterType().getAddressSpace(); + llvm::Value *ThisPtr = + getAsNaturalPointerTo(This, D->getThisType()->getPointeeType()); if (SlotAS != ThisAS) { unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS); llvm::Type *NewType = - ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS); - ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(), - ThisAS, SlotAS, NewType); + llvm::PointerType::get(getLLVMContext(), TargetThisAS); + ThisPtr = getTargetHooks().performAddrSpaceCast(*this, ThisPtr, ThisAS, + SlotAS, NewType); } // Push the this ptr. @@ -2137,7 +2204,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, const CXXRecordDecl *ClassDecl = D->getParent(); if (!NewPointerIsChecked) - EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This, getContext().getRecordType(ClassDecl), CharUnits::Zero()); if (D->isTrivial() && D->isDefaultConstructor()) { @@ -2150,10 +2217,9 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, // model that copy. if (isMemcpyEquivalentSpecialMember(D)) { assert(Args.size() == 2 && "unexpected argcount for trivial ctor"); - QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType(); - Address Src(Args[1].getRValue(*this).getScalarVal(), - CGM.getNaturalTypeAlignment(SrcTy)); + Address Src = makeNaturalAddressForPointer( + Args[1].getRValue(*this).getScalarVal(), SrcTy); LValue SrcLVal = MakeAddrLValue(Src, SrcTy); QualType DestTy = getContext().getTypeDeclType(ClassDecl); LValue DestLVal = MakeAddrLValue(This, DestTy); @@ -2182,7 +2248,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); - EmitCall(Info, Callee, ReturnValueSlot(), Args, nullptr, Loc); + EmitCall(Info, Callee, ReturnValueSlot(), Args, nullptr, false, Loc); // Generate vtable assumptions if we're constructing a complete object // with a vtable. We don't do this for base subobjects for two reasons: @@ -2206,7 +2272,9 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall( const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) { CallArgList Args; - CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType()); + CallArg ThisArg(RValue::get(getAsNaturalPointerTo( + This, D->getThisType()->getPointeeType())), + D->getThisType()); // Forward the parameters. if (InheritedFromVBase && @@ -2331,13 +2399,15 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, CallArgList Args; // Push the this ptr. - Args.add(RValue::get(This.getPointer()), D->getThisType()); + Args.add(RValue::get(getAsNaturalPointerTo(This, D->getThisType())), + D->getThisType()); // Push the src ptr. QualType QT = *(FPT->param_type_begin()); llvm::Type *t = CGM.getTypes().ConvertType(QT); - Src = Builder.CreateBitCast(Src, t); - Args.add(RValue::get(Src.getPointer()), QT); + llvm::Value *Val = getAsNaturalPointerTo(Src, D->getThisType()); + llvm::Value *SrcVal = Builder.CreateBitCast(Val, t); + Args.add(RValue::get(SrcVal), QT); // Skip over first argument (Src). EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(), @@ -2361,7 +2431,9 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, // this Address This = LoadCXXThisAddress(); - DelegateArgs.add(RValue::get(This.getPointer()), (*I)->getType()); + DelegateArgs.add(RValue::get(getAsNaturalPointerTo( + This, (*I)->getType()->getPointeeType())), + (*I)->getType()); ++I; // FIXME: The location of the VTT parameter in the parameter list is @@ -2399,7 +2471,7 @@ namespace { void Emit(CodeGenFunction &CGF, Flags flags) override { // We are calling the destructor from within the constructor. // Therefore, "this" should have the expected type. - QualType ThisTy = Dtor->getThisObjectType(); + QualType ThisTy = Dtor->getFunctionObjectParameterType(); CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, /*Delegating=*/true, Addr, ThisTy); } @@ -2504,7 +2576,6 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) { // Apply the offsets. Address VTableField = LoadCXXThisAddress(); - if (!NonVirtualOffset.isZero() || VirtualOffset) VTableField = ApplyNonVirtualAndVirtualOffset( *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass, @@ -2513,18 +2584,18 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) { // Finally, store the address point. Use the same LLVM types as the field to // support optimization. unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace(); - unsigned ProgAS = CGM.getDataLayout().getProgramAddressSpace(); - llvm::Type *VTablePtrTy = - llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true) - ->getPointerTo(ProgAS) - ->getPointerTo(GlobalsAS); - VTableField = Builder.CreatePointerBitCastOrAddrSpaceCast( - VTableField, VTablePtrTy->getPointerTo(GlobalsAS)); - VTableAddressPoint = Builder.CreatePointerBitCastOrAddrSpaceCast( - VTableAddressPoint, VTablePtrTy); + llvm::Type *PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), GlobalsAS); + // vtable field is derived from `this` pointer, therefore they should be in + // the same addr space. Note that this might not be LLVM address space 0. + VTableField = VTableField.withElementType(PtrTy); + + if (auto AuthenticationInfo = CGM.getVTablePointerAuthInfo( + this, Vptr.Base.getBase(), VTableField.emitRawPointer(*this))) + VTableAddressPoint = + EmitPointerAuthSign(*AuthenticationInfo, VTableAddressPoint); llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); - TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrTy); + TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(PtrTy); CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); if (CGM.getCodeGenOpts().OptimizationLevel > 0 && CGM.getCodeGenOpts().StrictVTablePointers) @@ -2616,12 +2687,35 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { llvm::Value *CodeGenFunction::GetVTablePtr(Address This, llvm::Type *VTableTy, - const CXXRecordDecl *RD) { - Address VTablePtrSrc = Builder.CreateElementBitCast(This, VTableTy); + const CXXRecordDecl *RD, + VTableAuthMode AuthMode) { + Address VTablePtrSrc = This.withElementType(VTableTy); llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTableTy); CGM.DecorateInstructionWithTBAA(VTable, TBAAInfo); + if (auto AuthenticationInfo = + CGM.getVTablePointerAuthInfo(this, RD, This.emitRawPointer(*this))) { + if (AuthMode != VTableAuthMode::UnsafeUbsanStrip) { + VTable = cast<llvm::Instruction>( + EmitPointerAuthAuth(*AuthenticationInfo, VTable)); + if (AuthMode == VTableAuthMode::MustTrap) { + // This is clearly suboptimal but until we have an ability + // to rely on the authentication intrinsic trapping and force + // an authentication to occur we don't really have a choice. + VTable = + cast<llvm::Instruction>(Builder.CreateBitCast(VTable, Int8PtrTy)); + Builder.CreateLoad(RawAddress(VTable, Int8Ty, CGM.getPointerAlign()), + /* IsVolatile */ true); + } + } else { + VTable = cast<llvm::Instruction>(EmitPointerAuthAuth( + CGPointerAuthInfo(0, PointerAuthenticationMode::Strip, false, false, + nullptr), + VTable)); + } + } + if (CGM.getCodeGenOpts().OptimizationLevel > 0 && CGM.getCodeGenOpts().StrictVTablePointers) CGM.DecorateInstructionWithInvariantGroup(VTable, RD); @@ -2670,18 +2764,23 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, if (SanOpts.has(SanitizerKind::CFIVCall)) EmitVTablePtrCheckForCall(RD, VTable, CodeGenFunction::CFITCK_VCall, Loc); else if (CGM.getCodeGenOpts().WholeProgramVTables && - // Don't insert type test assumes if we are forcing public std + // Don't insert type test assumes if we are forcing public // visibility. - !CGM.HasLTOVisibilityPublicStd(RD)) { - llvm::Metadata *MD = - CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0)); + !CGM.AlwaysHasLTOVisibilityPublic(RD)) { + QualType Ty = QualType(RD->getTypeForDecl(), 0); + llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(Ty); llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD); - llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy); + // If we already know that the call has hidden LTO visibility, emit + // @llvm.type.test(). Otherwise emit @llvm.public.type.test(), which WPD + // will convert to @llvm.type.test() if we assert at link time that we have + // whole program visibility. + llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD) + ? llvm::Intrinsic::type_test + : llvm::Intrinsic::public_type_test; llvm::Value *TypeTest = - Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), - {CastedVTable, TypeId}); + Builder.CreateCall(CGM.getIntrinsic(IID), {VTable, TypeId}); Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::assume), TypeTest); } } @@ -2696,8 +2795,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXRecordDecl *RD, EmitVTablePtrCheck(RD, VTable, TCK, Loc); } -void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, - llvm::Value *Derived, +void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc) { @@ -2720,7 +2818,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, if (MayBeNull) { llvm::Value *DerivedNotNull = - Builder.CreateIsNotNull(Derived, "cast.nonnull"); + Builder.CreateIsNotNull(Derived.emitRawPointer(*this), "cast.nonnull"); llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check"); ContBlock = createBasicBlock("cast.cont"); @@ -2731,8 +2829,8 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, } llvm::Value *VTable; - std::tie(VTable, ClassDecl) = CGM.getCXXABI().LoadVTablePtr( - *this, Address(Derived, getPointerAlign()), ClassDecl); + std::tie(VTable, ClassDecl) = + CGM.getCXXABI().LoadVTablePtr(*this, Derived, ClassDecl); EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); @@ -2776,7 +2874,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, } std::string TypeName = RD->getQualifiedNameAsString(); - if (getContext().getSanitizerBlacklist().isBlacklistedType(M, TypeName)) + if (getContext().getNoSanitizeList().containsType(M, TypeName)) return; SanitizerScope SanScope(this); @@ -2786,9 +2884,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0)); llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD); - llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy); llvm::Value *TypeTest = Builder.CreateCall( - CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, TypeId}); + CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, TypeId}); llvm::Constant *StaticData[] = { llvm::ConstantInt::get(Int8Ty, TCK), @@ -2798,7 +2895,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { - EmitCfiSlowPathCheck(M, TypeTest, CrossDsoTypeId, CastedVTable, StaticData); + EmitCfiSlowPathCheck(M, TypeTest, CrossDsoTypeId, VTable, StaticData); return; } @@ -2811,9 +2908,9 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, CGM.getLLVMContext(), llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); llvm::Value *ValidVtable = Builder.CreateCall( - CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, AllVtables}); + CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables}); EmitCheck(std::make_pair(TypeTest, M), SanitizerHandler::CFICheckFail, - StaticData, {CastedVTable, ValidVtable}); + StaticData, {VTable, ValidVtable}); } bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) { @@ -2829,12 +2926,13 @@ bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) { return false; std::string TypeName = RD->getQualifiedNameAsString(); - return !getContext().getSanitizerBlacklist().isBlacklistedType( - SanitizerKind::CFIVCall, TypeName); + return !getContext().getNoSanitizeList().containsType(SanitizerKind::CFIVCall, + TypeName); } llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad( - const CXXRecordDecl *RD, llvm::Value *VTable, uint64_t VTableByteOffset) { + const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy, + uint64_t VTableByteOffset) { SanitizerScope SanScope(this); EmitSanitizerStatReport(llvm::SanStat_CFI_VCall); @@ -2843,35 +2941,34 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad( CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0)); llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD); - llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy); llvm::Value *CheckedLoad = Builder.CreateCall( CGM.getIntrinsic(llvm::Intrinsic::type_checked_load), - {CastedVTable, llvm::ConstantInt::get(Int32Ty, VTableByteOffset), - TypeId}); + {VTable, llvm::ConstantInt::get(Int32Ty, VTableByteOffset), TypeId}); llvm::Value *CheckResult = Builder.CreateExtractValue(CheckedLoad, 1); std::string TypeName = RD->getQualifiedNameAsString(); if (SanOpts.has(SanitizerKind::CFIVCall) && - !getContext().getSanitizerBlacklist().isBlacklistedType( - SanitizerKind::CFIVCall, TypeName)) { + !getContext().getNoSanitizeList().containsType(SanitizerKind::CFIVCall, + TypeName)) { EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIVCall), SanitizerHandler::CFICheckFail, {}, {}); } - return Builder.CreateBitCast( - Builder.CreateExtractValue(CheckedLoad, 0), - cast<llvm::PointerType>(VTable->getType())->getElementType()); + return Builder.CreateBitCast(Builder.CreateExtractValue(CheckedLoad, 0), + VTableTy); } void CodeGenFunction::EmitForwardingCallToLambda( - const CXXMethodDecl *callOperator, - CallArgList &callArgs) { + const CXXMethodDecl *callOperator, CallArgList &callArgs, + const CGFunctionInfo *calleeFnInfo, llvm::Constant *calleePtr) { // Get the address of the call operator. - const CGFunctionInfo &calleeFnInfo = - CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); - llvm::Constant *calleePtr = - CGM.GetAddrOfFunction(GlobalDecl(callOperator), - CGM.getTypes().GetFunctionType(calleeFnInfo)); + if (!calleeFnInfo) + calleeFnInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); + + if (!calleePtr) + calleePtr = + CGM.GetAddrOfFunction(GlobalDecl(callOperator), + CGM.getTypes().GetFunctionType(*calleeFnInfo)); // Prepare the return slot. const FunctionProtoType *FPT = @@ -2879,8 +2976,8 @@ void CodeGenFunction::EmitForwardingCallToLambda( QualType resultType = FPT->getReturnType(); ReturnValueSlot returnSlot; if (!resultType->isVoidType() && - calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && - !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) + calleeFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && + !hasScalarEvaluationKind(calleeFnInfo->getReturnType())) returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified(), /*IsUnused=*/false, /*IsExternallyDestructed=*/true); @@ -2891,7 +2988,7 @@ void CodeGenFunction::EmitForwardingCallToLambda( // Now emit our call. auto callee = CGCallee::forDirect(calleePtr, GlobalDecl(callOperator)); - RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs); + RValue RV = EmitCall(*calleeFnInfo, callee, returnSlot, callArgs); // If necessary, copy the returned value into the slot. if (!resultType->isVoidType() && returnSlot.isNull()) { @@ -2922,10 +3019,10 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() { QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); Address ThisPtr = GetAddrOfBlockDecl(variable); - CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType); + CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType); // Add the rest of the parameters. - for (auto param : BD->parameters()) + for (auto *param : BD->parameters()) EmitDelegateCallArg(CallArgs, param, param->getBeginLoc()); assert(!Lambda->isGenericLambda() && @@ -2933,20 +3030,35 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() { EmitForwardingCallToLambda(CallOp, CallArgs); } -void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { +void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) { + if (MD->isVariadic()) { + // FIXME: Making this work correctly is nasty because it requires either + // cloning the body of the call operator or making the call operator + // forward. + CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); + return; + } + const CXXRecordDecl *Lambda = MD->getParent(); // Start building arguments for forwarding call CallArgList CallArgs; - QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); - llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); - CallArgs.add(RValue::get(ThisPtr), ThisType); + QualType LambdaType = getContext().getRecordType(Lambda); + QualType ThisType = getContext().getPointerType(LambdaType); + Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture"); + CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType); - // Add the rest of the parameters. - for (auto Param : MD->parameters()) + EmitLambdaDelegatingInvokeBody(MD, CallArgs); +} + +void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD, + CallArgList &CallArgs) { + // Add the rest of the forwarded parameters. + for (auto *Param : MD->parameters()) EmitDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); + const CXXRecordDecl *Lambda = MD->getParent(); const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); // For a generic lambda, find the corresponding call operator specialization // to which the call to the static-invoker shall be forwarded. @@ -2960,10 +3072,21 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { assert(CorrespondingCallOpSpecialization); CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); } + + // Special lambda forwarding when there are inalloca parameters. + if (hasInAllocaArg(MD)) { + const CGFunctionInfo *ImplFnInfo = nullptr; + llvm::Function *ImplFn = nullptr; + EmitLambdaInAllocaImplFn(CallOp, &ImplFnInfo, &ImplFn); + + EmitForwardingCallToLambda(CallOp, CallArgs, ImplFnInfo, ImplFn); + return; + } + EmitForwardingCallToLambda(CallOp, CallArgs); } -void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) { +void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) { if (MD->isVariadic()) { // FIXME: Making this work correctly is nasty because it requires either // cloning the body of the call operator or making the call operator forward. @@ -2971,5 +3094,56 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) { return; } - EmitLambdaDelegatingInvokeBody(MD); + // Forward %this argument. + CallArgList CallArgs; + QualType LambdaType = getContext().getRecordType(MD->getParent()); + QualType ThisType = getContext().getPointerType(LambdaType); + llvm::Value *ThisArg = CurFn->getArg(0); + CallArgs.add(RValue::get(ThisArg), ThisType); + + EmitLambdaDelegatingInvokeBody(MD, CallArgs); +} + +void CodeGenFunction::EmitLambdaInAllocaImplFn( + const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo, + llvm::Function **ImplFn) { + const CGFunctionInfo &FnInfo = + CGM.getTypes().arrangeCXXMethodDeclaration(CallOp); + llvm::Function *CallOpFn = + cast<llvm::Function>(CGM.GetAddrOfFunction(GlobalDecl(CallOp))); + + // Emit function containing the original call op body. __invoke will delegate + // to this function. + SmallVector<CanQualType, 4> ArgTypes; + for (auto I = FnInfo.arg_begin(); I != FnInfo.arg_end(); ++I) + ArgTypes.push_back(I->type); + *ImplFnInfo = &CGM.getTypes().arrangeLLVMFunctionInfo( + FnInfo.getReturnType(), FnInfoOpts::IsDelegateCall, ArgTypes, + FnInfo.getExtInfo(), {}, FnInfo.getRequiredArgs()); + + // Create mangled name as if this was a method named __impl. If for some + // reason the name doesn't look as expected then just tack __impl to the + // front. + // TODO: Use the name mangler to produce the right name instead of using + // string replacement. + StringRef CallOpName = CallOpFn->getName(); + std::string ImplName; + if (size_t Pos = CallOpName.find_first_of("<lambda")) + ImplName = ("?__impl@" + CallOpName.drop_front(Pos)).str(); + else + ImplName = ("__impl" + CallOpName).str(); + + llvm::Function *Fn = CallOpFn->getParent()->getFunction(ImplName); + if (!Fn) { + Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(**ImplFnInfo), + llvm::GlobalValue::InternalLinkage, ImplName, + CGM.getModule()); + CGM.SetInternalFunctionAttributes(CallOp, Fn, **ImplFnInfo); + + const GlobalDecl &GD = GlobalDecl(CallOp); + const auto *D = cast<FunctionDecl>(GD.getDecl()); + CodeGenFunction(CGM).GenerateCode(GD, Fn, **ImplFnInfo); + CGM.SetLLVMFunctionAttributesForDefinition(D, Fn); + } + *ImplFn = Fn; } |