aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGExprCXX.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGExprCXX.cpp')
-rw-r--r--lib/CodeGen/CGExprCXX.cpp684
1 files changed, 436 insertions, 248 deletions
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index eec2aceb88a2..71c8fb8b7ae3 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -28,25 +28,18 @@ static RequiredArgs
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE,
- CallArgList &Args) {
+ CallArgList &Args, CallArgList *RtlArgs) {
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
"Trying to emit a member or operator call expr on a static method!");
-
- // C++11 [class.mfct.non-static]p2:
- // If a non-static member function of a class X is called for an object that
- // is not of type X, or of a type derived from X, the behavior is undefined.
- SourceLocation CallLoc;
- if (CE)
- CallLoc = CE->getExprLoc();
- CGF.EmitTypeCheck(
- isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
- : CodeGenFunction::TCK_MemberCall,
- CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
+ ASTContext &C = CGF.getContext();
// Push the this ptr.
- Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
+ const CXXRecordDecl *RD =
+ CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
+ Args.add(RValue::get(This),
+ RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
@@ -57,7 +50,12 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
// And the rest of the call args.
- if (CE) {
+ if (RtlArgs) {
+ // Special case: if the caller emitted the arguments right-to-left already
+ // (prior to emitting the *this argument), we're done. This happens for
+ // assignment operators.
+ Args.addFrom(*RtlArgs);
+ } else if (CE) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
@@ -71,26 +69,78 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
}
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
- const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ const CXXMethodDecl *MD, const CGCallee &Callee,
+ ReturnValueSlot ReturnValue,
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
- const CallExpr *CE) {
+ const CallExpr *CE, CallArgList *RtlArgs) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
CallArgList Args;
RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
- *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args);
- return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
- Callee, ReturnValue, Args, MD);
+ *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
+ auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
RValue CodeGenFunction::EmitCXXDestructorCall(
- const CXXDestructorDecl *DD, llvm::Value *Callee, llvm::Value *This,
+ const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
StructorType Type) {
CallArgList Args;
commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
- ImplicitParamTy, CE, Args);
+ ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
- Callee, ReturnValueSlot(), Args, DD);
+ Callee, ReturnValueSlot(), Args);
+}
+
+RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
+ const CXXPseudoDestructorExpr *E) {
+ QualType DestroyedType = E->getDestroyedType();
+ if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = E->getBase();
+ Address BaseValue = Address::invalid();
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitPointerWithAlignment(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (DestroyedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ DestroyedType.isVolatileQualified()),
+ ARCPreciseLifetime);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitIgnoredExpr(E->getBase());
+ }
+
+ return RValue::get(nullptr);
}
static CXXRecordDecl *getCXXRecord(const Expr *E) {
@@ -115,8 +165,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
- llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
+ CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
ReturnValue);
}
@@ -166,6 +216,19 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
}
+ // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
+ // operator before the LHS.
+ CallArgList RtlArgStorage;
+ CallArgList *RtlArgs = nullptr;
+ if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ if (OCE->isAssignmentOp()) {
+ RtlArgs = &RtlArgStorage;
+ EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
+ drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
+ /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
+ }
+ }
+
Address This = Address::invalid();
if (IsArrow)
This = EmitPointerWithAlignment(Base);
@@ -183,10 +246,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
// We don't like to generate the trivial copy/move assignment operator
// when it isn't necessary; just produce the proper effect here.
- // Special case: skip first argument of CXXOperatorCall (it is "this").
- unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
- Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
- EmitAggregateAssign(This, RHS, CE->getType());
+ LValue RHS = isa<CXXOperatorCallExpr>(CE)
+ ? MakeNaturalAlignAddrLValue(
+ (*RtlArgs)[0].RV.getScalarVal(),
+ (*(CE->arg_begin() + 1))->getType())
+ : EmitLValue(*CE->arg_begin());
+ EmitAggregateAssign(This, RHS.getAddress(), CE->getType());
return RValue::get(This.getPointer());
}
@@ -217,6 +282,22 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ SourceLocation CallLoc;
+ ASTContext &C = getContext();
+ if (CE)
+ CallLoc = CE->getExprLoc();
+
+ EmitTypeCheck(isa<CXXConstructorDecl>(CalleeDecl)
+ ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()));
+
+ // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
+ // 'CalleeDecl' instead.
+
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
// virtual call mechanism.
@@ -224,8 +305,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
- llvm::Value *Callee;
-
+
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
assert(CE->arg_begin() == CE->arg_end() &&
"Destructor shouldn't have explicit parameters");
@@ -234,24 +314,32 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGM.getCXXABI().EmitVirtualDestructorCall(
*this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
} else {
+ CGCallee Callee;
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee =
- CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
+ Callee = CGCallee::forDirect(
+ CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
+ Dtor);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
- Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
+ DDtor);
}
- EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ EmitCXXMemberOrOperatorCall(
+ CalleeDecl, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
}
return RValue::get(nullptr);
}
+ CGCallee Callee;
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
+ Ctor);
} else if (UseVirtualCall) {
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
CE->getLocStart());
@@ -266,9 +354,11 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGM.GetAddrOfFunction(MD, Ty);
+ Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
else {
- Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
+ DevirtualizedMethod);
}
}
@@ -277,8 +367,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
*this, CalleeDecl, This, UseVirtualCall);
}
- return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ return EmitCXXMemberOrOperatorCall(
+ CalleeDecl, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
}
RValue
@@ -297,9 +388,6 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- // Get the member function pointer.
- llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
-
// Emit the 'this' pointer.
Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
@@ -310,9 +398,12 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *ThisPtrForCall = nullptr;
- llvm::Value *Callee =
+ CGCallee Callee =
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
ThisPtrForCall, MemFnPtr, MPT);
@@ -851,8 +942,68 @@ void CodeGenFunction::EmitNewArrayInitializer(
CharUnits ElementAlign =
BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+ // Attempt to perform zero-initialization using memset.
+ auto TryMemsetInitialization = [&]() -> bool {
+ // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
+ // we can initialize with a memset to -1.
+ if (!CGM.getTypes().isZeroInitializable(ElementType))
+ return false;
+
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+
+ // Subtract out the size of any elements we've already initialized.
+ auto *RemainingSize = AllocSizeWithoutCookie;
+ if (InitListElements) {
+ // We know this can't overflow; we check this when doing the allocation.
+ auto *InitializedSize = llvm::ConstantInt::get(
+ RemainingSize->getType(),
+ getContext().getTypeSizeInChars(ElementType).getQuantity() *
+ InitListElements);
+ RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
+ }
+
+ // Create the memset.
+ Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
+ return true;
+ };
+
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ // Initializing from a (braced) string literal is a special case; the init
+ // list element does not initialize a (single) array element.
+ if (ILE->isStringLiteralInit()) {
+ // Initialize the initial portion of length equal to that of the string
+ // literal. The allocation must be for at least this much; we emitted a
+ // check for that earlier.
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(ILE->getInit(0), Slot);
+
+ // Move past these elements.
+ InitListElements =
+ cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
+ ->getSize().getZExtValue();
+ CurPtr =
+ Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Builder.getSize(InitListElements),
+ "string.init.end"),
+ CurPtr.getAlignment().alignmentAtOffset(InitListElements *
+ ElementSize));
+
+ // Zero out the rest, if any remain.
+ llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
+ if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
+ bool OK = TryMemsetInitialization();
+ (void)OK;
+ assert(OK && "couldn't memset character type?");
+ }
+ return;
+ }
+
InitListElements = ILE->getNumInits();
// If this is a multi-dimensional array new, we will initialize multiple
@@ -919,32 +1070,6 @@ void CodeGenFunction::EmitNewArrayInitializer(
CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
}
- // Attempt to perform zero-initialization using memset.
- auto TryMemsetInitialization = [&]() -> bool {
- // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
- // we can initialize with a memset to -1.
- if (!CGM.getTypes().isZeroInitializable(ElementType))
- return false;
-
- // Optimization: since zero initialization will just set the memory
- // to all zeroes, generate a single memset to do it in one shot.
-
- // Subtract out the size of any elements we've already initialized.
- auto *RemainingSize = AllocSizeWithoutCookie;
- if (InitListElements) {
- // We know this can't overflow; we check this when doing the allocation.
- auto *InitializedSize = llvm::ConstantInt::get(
- RemainingSize->getType(),
- getContext().getTypeSizeInChars(ElementType).getQuantity() *
- InitListElements);
- RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
- }
-
- // Create the memset.
- Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
- return true;
- };
-
// If all elements have already been initialized, skip any further
// initialization.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1110,23 +1235,24 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
/// Emit a call to an operator new or operator delete function, as implicitly
/// created by new-expressions and delete-expressions.
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
- const FunctionDecl *Callee,
+ const FunctionDecl *CalleeDecl,
const FunctionProtoType *CalleeType,
const CallArgList &Args) {
llvm::Instruction *CallOrInvoke;
- llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
+ llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
Args, CalleeType, /*chainCall=*/false),
- CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
+ Callee, ReturnValueSlot(), Args, &CallOrInvoke);
/// C++1y [expr.new]p10:
/// [In a new-expression,] an implementation is allowed to omit a call
/// to a replaceable global allocation function.
///
/// We model such elidable calls with the 'builtin' attribute.
- llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
- if (Callee->isReplaceableGlobalAllocationFunction() &&
+ llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
+ if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
// FIXME: Add addAttribute to CallSite.
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
@@ -1159,111 +1285,116 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
llvm_unreachable("predeclared global operator new/delete is missing");
}
-namespace {
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression.
- class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
- const FunctionDecl *OperatorDelete;
- llvm::Value *Ptr;
- llvm::Value *AllocSize;
-
- RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+static std::pair<bool, bool>
+shouldPassSizeAndAlignToUsualDelete(const FunctionProtoType *FPT) {
+ auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
- public:
- static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(RValue);
- }
+ // The first argument is always a void*.
+ ++AI;
- CallDeleteDuringNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- llvm::Value *Ptr,
- llvm::Value *AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
-
- void setPlacementArg(unsigned I, RValue Arg) {
- assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
- }
-
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
- (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
+ // Figure out what other parameters we should be implicitly passing.
+ bool PassSize = false;
+ bool PassAlignment = false;
- CallArgList DeleteArgs;
-
- // The first argument is always a void*.
- FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
- DeleteArgs.add(RValue::get(Ptr), *AI++);
-
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumParams() == NumPlacementArgs + 2)
- DeleteArgs.add(RValue::get(AllocSize), *AI++);
+ if (AI != AE && (*AI)->isIntegerType()) {
+ PassSize = true;
+ ++AI;
+ }
- // Pass the rest of the arguments, which must match exactly.
- for (unsigned I = 0; I != NumPlacementArgs; ++I)
- DeleteArgs.add(getPlacementArgs()[I], *AI++);
+ if (AI != AE && (*AI)->isAlignValT()) {
+ PassAlignment = true;
+ ++AI;
+ }
- // Call 'operator delete'.
- EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
- }
- };
+ assert(AI == AE && "unexpected usual deallocation function parameter");
+ return {PassSize, PassAlignment};
+}
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression when the new expression is
- /// conditional.
- class CallDeleteDuringConditionalNew final : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon abnormal
+ /// exit from a new expression. Templated on a traits type that deals with
+ /// ensuring that the arguments dominate the cleanup if necessary.
+ template<typename Traits>
+ class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+ /// Type used to hold llvm::Value*s.
+ typedef typename Traits::ValueTy ValueTy;
+ /// Type used to hold RValues.
+ typedef typename Traits::RValueTy RValueTy;
+ struct PlacementArg {
+ RValueTy ArgValue;
+ QualType ArgType;
+ };
+
+ unsigned NumPlacementArgs : 31;
+ unsigned PassAlignmentToPlacementDelete : 1;
const FunctionDecl *OperatorDelete;
- DominatingValue<RValue>::saved_type Ptr;
- DominatingValue<RValue>::saved_type AllocSize;
+ ValueTy Ptr;
+ ValueTy AllocSize;
+ CharUnits AllocAlign;
- DominatingValue<RValue>::saved_type *getPlacementArgs() {
- return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ PlacementArg *getPlacementArgs() {
+ return reinterpret_cast<PlacementArg *>(this + 1);
}
public:
static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ return NumPlacementArgs * sizeof(PlacementArg);
}
- CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- DominatingValue<RValue>::saved_type Ptr,
- DominatingValue<RValue>::saved_type AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
-
- void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete, ValueTy Ptr,
+ ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
+ CharUnits AllocAlign)
+ : NumPlacementArgs(NumPlacementArgs),
+ PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
+ OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
+ AllocAlign(AllocAlign) {}
+
+ void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
+ getPlacementArgs()[I] = {Arg, Type};
}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
- (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
-
+ const FunctionProtoType *FPT =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
// The first argument is always a void*.
- FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
- DeleteArgs.add(Ptr.restore(CGF), *AI++);
-
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumParams() == NumPlacementArgs + 2) {
- RValue RV = AllocSize.restore(CGF);
- DeleteArgs.add(RV, *AI++);
+ DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
+
+ // Figure out what other parameters we should be implicitly passing.
+ bool PassSize = false;
+ bool PassAlignment = false;
+ if (NumPlacementArgs) {
+ // A placement deallocation function is implicitly passed an alignment
+ // if the placement allocation function was, but is never passed a size.
+ PassAlignment = PassAlignmentToPlacementDelete;
+ } else {
+ // For a non-placement new-expression, 'operator delete' can take a
+ // size and/or an alignment if it has the right parameters.
+ std::tie(PassSize, PassAlignment) =
+ shouldPassSizeAndAlignToUsualDelete(FPT);
}
+ // The second argument can be a std::size_t (for non-placement delete).
+ if (PassSize)
+ DeleteArgs.add(Traits::get(CGF, AllocSize),
+ CGF.getContext().getSizeType());
+
+ // The next (second or third) argument can be a std::align_val_t, which
+ // is an enum whose underlying type is std::size_t.
+ // FIXME: Use the right type as the parameter type. Note that in a call
+ // to operator delete(size_t, ...), we may not have it available.
+ if (PassAlignment)
+ DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
+ CGF.SizeTy, AllocAlign.getQuantity())),
+ CGF.getContext().getSizeType());
+
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
- RValue RV = getPlacementArgs()[I].restore(CGF);
- DeleteArgs.add(RV, *AI++);
+ auto Arg = getPlacementArgs()[I];
+ DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
}
// Call 'operator delete'.
@@ -1278,18 +1409,34 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
const CXXNewExpr *E,
Address NewPtr,
llvm::Value *AllocSize,
+ CharUnits AllocAlign,
const CallArgList &NewArgs) {
+ unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
+
// If we're not inside a conditional branch, then the cleanup will
// dominate and we can do the easier (and more efficient) thing.
if (!CGF.isInConditionalBranch()) {
- CallDeleteDuringNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- NewPtr.getPointer(),
- AllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
+ struct DirectCleanupTraits {
+ typedef llvm::Value *ValueTy;
+ typedef RValue RValueTy;
+ static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
+ static RValue get(CodeGenFunction &, RValueTy V) { return V; }
+ };
+
+ typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
+
+ DirectCleanup *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr.getPointer(),
+ AllocSize,
+ E->passAlignment(),
+ AllocAlign);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ auto &Arg = NewArgs[I + NumNonPlacementArgs];
+ Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
+ }
return;
}
@@ -1300,15 +1447,28 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
- CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- SavedNewPtr,
- SavedAllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I,
- DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
+ struct ConditionalCleanupTraits {
+ typedef DominatingValue<RValue>::saved_type ValueTy;
+ typedef DominatingValue<RValue>::saved_type RValueTy;
+ static RValue get(CodeGenFunction &CGF, ValueTy V) {
+ return V.restore(CGF);
+ }
+ };
+ typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
+
+ ConditionalCleanup *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize,
+ E->passAlignment(),
+ AllocAlign);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ auto &Arg = NewArgs[I + NumNonPlacementArgs];
+ Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
+ Arg.Ty);
+ }
CGF.initFullExprCleanup();
}
@@ -1323,7 +1483,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
if (E->isArray() && E->hasInitializer()) {
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
+ if (ILE && ILE->isStringLiteralInit())
+ minElements =
+ cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
+ ->getSize().getZExtValue();
+ else if (ILE)
minElements = ILE->getNumInits();
}
@@ -1332,6 +1497,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
+ CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1347,10 +1513,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The pointer expression will, in many cases, be an opaque void*.
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
- if (alignSource != AlignmentSource::Decl) {
- allocation = Address(allocation.getPointer(),
- getContext().getTypeAlignInChars(allocType));
- }
+ if (alignSource != AlignmentSource::Decl)
+ allocation = Address(allocation.getPointer(), allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
@@ -1363,28 +1527,55 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
} else {
const FunctionProtoType *allocatorType =
allocator->getType()->castAs<FunctionProtoType>();
+ unsigned ParamsToSkip = 0;
// The allocation size is the first argument.
QualType sizeType = getContext().getSizeType();
allocatorArgs.add(RValue::get(allocSize), sizeType);
+ ++ParamsToSkip;
+
+ if (allocSize != allocSizeWithoutCookie) {
+ CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
+ allocAlign = std::max(allocAlign, cookieAlign);
+ }
+
+ // The allocation alignment may be passed as the second argument.
+ if (E->passAlignment()) {
+ QualType AlignValT = sizeType;
+ if (allocatorType->getNumParams() > 1) {
+ AlignValT = allocatorType->getParamType(1);
+ assert(getContext().hasSameUnqualifiedType(
+ AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
+ sizeType) &&
+ "wrong type for alignment parameter");
+ ++ParamsToSkip;
+ } else {
+ // Corner case, passing alignment to 'operator new(size_t, ...)'.
+ assert(allocator->isVariadic() && "can't pass alignment to allocator");
+ }
+ allocatorArgs.add(
+ RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
+ AlignValT);
+ }
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
+ // FIXME: Why do we not pass a CalleeDecl here?
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
- /* CalleeDecl */ nullptr,
- /*ParamsToSkip*/ 1);
+ /*CalleeDecl*/nullptr, /*ParamsToSkip*/ParamsToSkip);
RValue RV =
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
- // For now, only assume that the allocation function returns
- // something satisfactorily aligned for the element type, plus
- // the cookie if we have one.
- CharUnits allocationAlign =
- getContext().getTypeAlignInChars(allocType);
- if (allocSize != allocSizeWithoutCookie) {
- CharUnits cookieAlign = getSizeAlign(); // FIXME?
- allocationAlign = std::max(allocationAlign, cookieAlign);
+ // If this was a call to a global replaceable allocation function that does
+ // not take an alignment argument, the allocator is known to produce
+ // storage that's suitably aligned for any object that fits, up to a known
+ // threshold. Otherwise assume it's suitably aligned for the allocated type.
+ CharUnits allocationAlign = allocAlign;
+ if (!E->passAlignment() &&
+ allocator->isReplaceableGlobalAllocationFunction()) {
+ unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
+ Target.getNewAlign(), getContext().getTypeSize(allocType)));
+ allocationAlign = std::max(
+ allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
}
allocation = Address(RV.getScalarVal(), allocationAlign);
@@ -1423,7 +1614,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Instruction *cleanupDominator = nullptr;
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
- EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
+ allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
cleanupDominator = Builder.CreateUnreachable();
}
@@ -1485,31 +1677,58 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
- llvm::Value *Ptr,
- QualType DeleteTy) {
- assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+ llvm::Value *Ptr, QualType DeleteTy,
+ llvm::Value *NumElements,
+ CharUnits CookieSize) {
+ assert((!NumElements && CookieSize.isZero()) ||
+ DeleteFD->getOverloadedOperator() == OO_Array_Delete);
const FunctionProtoType *DeleteFTy =
DeleteFD->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
- // Check if we need to pass the size to the delete operator.
- llvm::Value *Size = nullptr;
- QualType SizeTy;
- if (DeleteFTy->getNumParams() == 2) {
- SizeTy = DeleteFTy->getParamType(1);
- CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
- Size = llvm::ConstantInt::get(ConvertType(SizeTy),
- DeleteTypeSize.getQuantity());
- }
+ std::pair<bool, bool> PassSizeAndAlign =
+ shouldPassSizeAndAlignToUsualDelete(DeleteFTy);
+
+ auto ParamTypeIt = DeleteFTy->param_type_begin();
- QualType ArgTy = DeleteFTy->getParamType(0);
+ // Pass the pointer itself.
+ QualType ArgTy = *ParamTypeIt++;
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
- if (Size)
- DeleteArgs.add(RValue::get(Size), SizeTy);
+ // Pass the size if the delete function has a size_t parameter.
+ if (PassSizeAndAlign.first) {
+ QualType SizeType = *ParamTypeIt++;
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
+ DeleteTypeSize.getQuantity());
+
+ // For array new, multiply by the number of elements.
+ if (NumElements)
+ Size = Builder.CreateMul(Size, NumElements);
+
+ // If there is a cookie, add the cookie size.
+ if (!CookieSize.isZero())
+ Size = Builder.CreateAdd(
+ Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
+
+ DeleteArgs.add(RValue::get(Size), SizeType);
+ }
+
+ // Pass the alignment if the delete function has an align_val_t parameter.
+ if (PassSizeAndAlign.second) {
+ QualType AlignValType = *ParamTypeIt++;
+ CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
+ getContext().getTypeAlignIfKnown(DeleteTy));
+ llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
+ DeleteTypeAlign.getQuantity());
+ DeleteArgs.add(RValue::get(Align), AlignValType);
+ }
+
+ assert(ParamTypeIt == DeleteFTy->param_type_end() &&
+ "unknown parameter to usual delete function");
// Emit the call to delete.
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
@@ -1546,6 +1765,15 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
QualType ElementType) {
+ // C++11 [expr.delete]p3:
+ // If the static type of the object to be deleted is different from its
+ // dynamic type, the static type shall be a base class of the dynamic type
+ // of the object to be deleted and the static type shall have a virtual
+ // destructor or the behavior is undefined.
+ CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
+ DE->getExprLoc(), Ptr.getPointer(),
+ ElementType);
+
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
@@ -1613,45 +1841,8 @@ namespace {
ElementType(ElementType), CookieSize(CookieSize) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *DeleteFTy =
- OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
-
- CallArgList Args;
-
- // Pass the pointer as the first argument.
- QualType VoidPtrTy = DeleteFTy->getParamType(0);
- llvm::Value *DeletePtr
- = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
- Args.add(RValue::get(DeletePtr), VoidPtrTy);
-
- // Pass the original requested size as the second argument.
- if (DeleteFTy->getNumParams() == 2) {
- QualType size_t = DeleteFTy->getParamType(1);
- llvm::IntegerType *SizeTy
- = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
-
- CharUnits ElementTypeSize =
- CGF.CGM.getContext().getTypeSizeInChars(ElementType);
-
- // The size of an element, multiplied by the number of elements.
- llvm::Value *Size
- = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
- if (NumElements)
- Size = CGF.Builder.CreateMul(Size, NumElements);
-
- // Plus the size of the cookie if applicable.
- if (!CookieSize.isZero()) {
- llvm::Value *CookieSizeV
- = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
- Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
- }
-
- Args.add(RValue::get(Size), size_t);
- }
-
- // Emit the call to delete.
- EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
+ CookieSize);
}
};
}
@@ -1949,10 +2140,7 @@ void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
auto VAT = CurField->getCapturedVLAType();
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
} else {
- ArrayRef<VarDecl *> ArrayIndexes;
- if (CurField->getType()->isArrayType())
- ArrayIndexes = E->getCaptureInitIndexVars(i);
- EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ EmitInitializerForField(*CurField, LV, *i);
}
}
}