aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGClass.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2012-04-14 14:01:31 +0000
committerDimitry Andric <dim@FreeBSD.org>2012-04-14 14:01:31 +0000
commitdbe13110f59f48b4dbb7552b3ac2935acdeece7f (patch)
treebe1815eb79b42ff482a8562b13c2dcbf0c5dcbee /lib/CodeGen/CGClass.cpp
parent9da628931ebf2609493570f87824ca22402cc65f (diff)
downloadsrc-dbe13110f59f48b4dbb7552b3ac2935acdeece7f.tar.gz
src-dbe13110f59f48b4dbb7552b3ac2935acdeece7f.zip
Vendor import of clang trunk r154661:vendor/clang/clang-trunk-r154661
Notes
Notes: svn path=/vendor/clang/dist/; revision=234287 svn path=/vendor/clang/clang-trunk-r154661/; revision=234288; tag=vendor/clang/clang-trunk-r154661
Diffstat (limited to 'lib/CodeGen/CGClass.cpp')
-rw-r--r--lib/CodeGen/CGClass.cpp362
1 files changed, 247 insertions, 115 deletions
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index c28ecc05ded6..6303e2069d58 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGBlocks.h"
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
@@ -95,7 +96,6 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// TODO: for complete types, this should be possible with a GEP.
llvm::Value *V = This;
if (Offset.isPositive()) {
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
}
@@ -125,8 +125,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
BaseOffset = NonVirtualOffset;
// Apply the base offset.
- llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
return ThisPtr;
@@ -250,9 +249,9 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
}
// Apply the offset.
- Value = Builder.CreatePtrToInt(Value, NonVirtualOffset->getType());
- Value = Builder.CreateSub(Value, NonVirtualOffset);
- Value = Builder.CreateIntToPtr(Value, DerivedPtrTy);
+ Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
@@ -397,16 +396,16 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
BaseClassDecl,
isBaseVirtual);
-
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(V, Qualifiers(),
+ AggValueSlot::forAddr(V, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
- if (CGF.CGM.getLangOptions().Exceptions &&
+ if (CGF.CGM.getLangOpts().Exceptions &&
!BaseClassDecl->hasTrivialDestructor())
CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
isBaseVirtual);
@@ -414,47 +413,59 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
LValue LHS,
+ Expr *Init,
llvm::Value *ArrayIndexVar,
- CXXCtorInitializer *MemberInit,
QualType T,
+ ArrayRef<VarDecl *> ArrayIndexes,
unsigned Index) {
- if (Index == MemberInit->getNumArrayIndices()) {
- CodeGenFunction::RunCleanupsScope Cleanups(CGF);
-
- llvm::Value *Dest = LHS.getAddress();
- if (ArrayIndexVar) {
- // If we have an array index variable, load it and use it as an offset.
- // Then, increment the value.
- llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
- Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
- llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
- Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
- CGF.Builder.CreateStore(Next, ArrayIndexVar);
- }
+ if (Index == ArrayIndexes.size()) {
+ LValue LV = LHS;
+ { // Scope for Cleanups.
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ LV.setAddress(Dest);
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
+ LV.setAlignment(std::min(Align, LV.getAlignment()));
+ }
- if (!CGF.hasAggregateLLVMType(T)) {
- LValue lvalue = CGF.MakeAddrLValue(Dest, T);
- CGF.EmitScalarInit(MemberInit->getInit(), /*decl*/ 0, lvalue, false);
- } else if (T->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
- LHS.isVolatileQualified());
- } else {
- AggValueSlot Slot =
- AggValueSlot::forAddr(Dest, LHS.getQuals(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
-
- CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ if (!CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, LV.getAddress(),
+ LV.isVolatileQualified());
+ } else {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ }
}
-
+
+ // Now, outside of the initializer cleanup scope, destroy the backing array
+ // for a std::initializer_list member.
+ CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
+
return;
}
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
assert(Array && "Array initialization without the array type?");
llvm::Value *IndexVar
- = CGF.GetAddrOfLocalVar(MemberInit->getArrayIndex(Index));
+ = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
assert(IndexVar && "Array index variable not loaded");
// Initialize this index variable to zero.
@@ -490,8 +501,8 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
// Inside the loop body recurse to emit the inner loop or, eventually, the
// constructor call.
- EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit,
- Array->getElementType(), Index + 1);
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
}
CGF.EmitBlock(ContinueBlock);
@@ -511,19 +522,15 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
namespace {
struct CallMemberDtor : EHScopeStack::Cleanup {
- FieldDecl *Field;
+ llvm::Value *V;
CXXDestructorDecl *Dtor;
- CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
- : Field(Field), Dtor(Dtor) {}
+ CallMemberDtor(llvm::Value *V, CXXDestructorDecl *Dtor)
+ : V(V), Dtor(Dtor) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
- // FIXME: Is this OK for C++0x delegating constructors?
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
-
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
+ V);
}
};
}
@@ -533,7 +540,7 @@ static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
return Moving ? Record->hasTrivialMoveConstructor() :
Record->hasTrivialCopyConstructor();
}
-
+
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXCtorInitializer *MemberInit,
@@ -545,7 +552,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// non-static data member initializers.
FieldDecl *Field = MemberInit->getAnyMember();
- QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
+ QualType FieldType = Field->getType();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS;
@@ -559,67 +566,83 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
- if (!CGF.hasAggregateLLVMType(Field->getType())) {
+ // Special case: if we are in a copy or move constructor, and we are copying
+ // an array of PODs or classes with trivial copy constructors, ignore the
+ // AST and perform the copy we know is equivalent.
+ // FIXME: This is hacky at best... if we had a bit more explicit information
+ // in the AST, we could generalize it more easily.
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicitlyDefined() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && hasTrivialCopyOrMoveConstructor(Record,
+ Constructor->isMoveConstructor()))) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+ }
+
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (MemberInit->getNumArrayIndices())
+ ArrayIndexes = MemberInit->getArrayIndexes();
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+}
+
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
+ LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
+ QualType FieldType = Field->getType();
+ if (!hasAggregateLLVMType(FieldType)) {
if (LHS.isSimple()) {
- CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS, false);
+ EmitExprAsInit(Init, Field, LHS, false);
} else {
- RValue RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
- CGF.EmitStoreThroughLValue(RHS, LHS);
+ RValue RHS = RValue::get(EmitScalarExpr(Init));
+ EmitStoreThroughLValue(RHS, LHS);
}
- } else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
- LHS.isVolatileQualified());
+ } else if (FieldType->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, LHS.getAddress(), LHS.isVolatileQualified());
} else {
llvm::Value *ArrayIndexVar = 0;
- const ConstantArrayType *Array
- = CGF.getContext().getAsConstantArrayType(FieldType);
- if (Array && Constructor->isImplicitlyDefined() &&
- Constructor->isCopyOrMoveConstructor()) {
- llvm::Type *SizeTy
- = CGF.ConvertType(CGF.getContext().getSizeType());
+ if (ArrayIndexes.size()) {
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
- QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ QualType BaseElementTy = getContext().getBaseElementType(FieldType);
+ llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
- BasePtr);
- LHS = CGF.MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
// Create an array index that will be used to walk over all of the
// objects we're constructing.
- ArrayIndexVar = CGF.CreateTempAlloca(SizeTy, "object.index");
+ ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- CGF.Builder.CreateStore(Zero, ArrayIndexVar);
+ Builder.CreateStore(Zero, ArrayIndexVar);
- // If we are copying an array of PODs or classes with trivial copy
- // constructors, perform a single aggregate copy.
- const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
- if (BaseElementTy.isPODType(CGF.getContext()) ||
- (Record && hasTrivialCopyOrMoveConstructor(Record,
- Constructor->isMoveConstructor()))) {
- // Find the source pointer. We knows it's the last argument because
- // we know we're in a copy constructor.
- unsigned SrcArgIndex = Args.size() - 1;
- llvm::Value *SrcPtr
- = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
- LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
-
- // Copy the aggregate.
- CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
- LHS.isVolatileQualified());
- return;
- }
// Emit the block variables for the array indices, if any.
- for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
- CGF.EmitAutoVarDecl(*MemberInit->getArrayIndex(I));
+ for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
+ EmitAutoVarDecl(*ArrayIndexes[I]);
}
- EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
+ EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
+ ArrayIndexes, 0);
- if (!CGF.CGM.getLangOptions().Exceptions)
+ if (!CGM.getLangOpts().Exceptions)
return;
// FIXME: If we have an array of classes w/ non-trivial destructors,
@@ -631,8 +654,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor())
- CGF.EHStack.pushCleanup<CallMemberDtor>(EHCleanup, Field,
- RD->getDestructor());
+ EHStack.pushCleanup<CallMemberDtor>(EHCleanup, LHS.getAddress(),
+ RD->getDestructor());
}
}
@@ -708,6 +731,9 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+ // TODO: in restricted cases, we can emit the vbase initializers of
+ // a complete ctor and then delegate to the base ctor.
+
// Emit the constructor prologue, i.e. the base and member
// initializers.
EmitCtorPrologue(Ctor, CtorType, Args);
@@ -912,7 +938,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
}
// -fapple-kext must inline any call to this dtor into
// the caller's body.
- if (getContext().getLangOptions().AppleKext)
+ if (getContext().getLangOpts().AppleKext)
CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
break;
}
@@ -940,13 +966,13 @@ namespace {
class DestroyField : public EHScopeStack::Cleanup {
const FieldDecl *field;
- CodeGenFunction::Destroyer &destroyer;
+ CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
public:
DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
- : field(field), destroyer(*destroyer),
+ : field(field), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
@@ -1039,6 +1065,10 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
QualType::DestructionKind dtorKind = type.isDestructedType();
if (!dtorKind) continue;
+ // Anonymous union members do not have their destructors called.
+ const RecordType *RT = type->getAsUnionType();
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+
CleanupKind cleanupKind = getCleanupKind(dtorKind);
EHStack.pushCleanup<DestroyField>(cleanupKind, field,
getDestroyer(dtorKind),
@@ -1145,7 +1175,7 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
// Evaluate the constructor and its arguments in a regular
// partial-destroy cleanup.
- if (getLangOptions().Exceptions &&
+ if (getLangOpts().Exceptions &&
!ctor->getParent()->hasTrivialDestructor()) {
Destroyer *destroyer = destroyCXXObject;
pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
@@ -1192,8 +1222,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGDebugInfo *DI = getDebugInfo();
if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
- // If debug info for this class has been emitted then this is the right time
- // to do so.
+ // If debug info for this class has not been emitted then this is the
+ // right time to do so.
const CXXRecordDecl *Parent = D->getParent();
DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
Parent->getLocation());
@@ -1273,7 +1303,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArg(Args, *Arg, ArgType);
}
- EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
+ EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
ReturnValueSlot(), Args, D);
}
@@ -1309,7 +1339,7 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
EmitDelegateCallArg(DelegateArgs, param);
}
- EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
+ EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
ReturnValueSlot(), DelegateArgs, Ctor);
}
@@ -1338,8 +1368,10 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
llvm::Value *ThisPtr = LoadCXXThis();
+ QualType Ty = getContext().getTagDeclType(Ctor->getParent());
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
AggValueSlot AggSlot =
- AggValueSlot::forAddr(ThisPtr, Qualifiers(),
+ AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
@@ -1347,7 +1379,7 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
const CXXRecordDecl *ClassDecl = Ctor->getParent();
- if (CGM.getLangOptions().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
CXXDtorType Type =
CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
@@ -1364,7 +1396,7 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
ForVirtualBase);
llvm::Value *Callee = 0;
- if (getContext().getLangOptions().AppleKext)
+ if (getContext().getLangOpts().AppleKext)
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
DD->getParent());
@@ -1485,7 +1517,8 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
llvm::Type *AddressPointPtrTy =
VTableAddressPoint->getType()->getPointerTo();
VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
- Builder.CreateStore(VTableAddressPoint, VTableField);
+ llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
+ CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
}
void
@@ -1568,7 +1601,9 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
llvm::Type *Ty) {
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
- return Builder.CreateLoad(VTablePtrSrc, "vtable");
+ llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
+ CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
+ return VTable;
}
static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
@@ -1682,7 +1717,7 @@ static bool UseVirtualCall(ASTContext &Context,
// When building with -fapple-kext, all calls must go through the vtable since
// the kernel linker can do runtime patching of vtables.
- if (Context.getLangOptions().AppleKext)
+ if (Context.getLangOpts().AppleKext)
return true;
return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
@@ -1692,13 +1727,110 @@ llvm::Value *
CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
llvm::Value *This) {
- const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
+ llvm::FunctionType *fnType =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodDeclaration(MD));
if (UseVirtualCall(getContext(), E, MD))
- return BuildVirtualCall(MD, This, Ty);
+ return BuildVirtualCall(MD, This, fnType);
+
+ return CGM.GetAddrOfFunction(MD, fnType);
+}
+
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs) {
+ // Lookup the call operator
+ DeclarationName Name
+ = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
+ CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
+ const FunctionProtoType *FPT =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the address of the call operator.
+ GlobalDecl GD(CallOperator);
+ const CGFunctionInfo &CalleeFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+
+ // Forward the returned value
+ if (!ResultType->isVoidType() && Slot.isNull())
+ EmitReturnOfRValue(RV, ResultType);
+}
+
+void CodeGenFunction::EmitLambdaBlockInvokeBody() {
+ const BlockDecl *BD = BlockInfo->getBlockDecl();
+ const VarDecl *variable = BD->capture_begin()->getVariable();
+ const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (BlockDecl::param_const_iterator I = BD->param_begin(),
+ E = BD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
+ if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitFunctionBody(Args);
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+ const CXXRecordDecl *Lambda = MD->getParent();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
- return CGM.GetAddrOfFunction(MD, Ty);
+ EmitLambdaDelegatingInvokeBody(MD);
}