aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/CGBuiltin.cpp13
-rw-r--r--lib/CodeGen/CGCXX.cpp172
-rw-r--r--lib/CodeGen/CGCall.cpp2
-rw-r--r--lib/CodeGen/CGDecl.cpp44
-rw-r--r--lib/CodeGen/CGException.cpp105
-rw-r--r--lib/CodeGen/CGExpr.cpp224
-rw-r--r--lib/CodeGen/CGExprAgg.cpp5
-rw-r--r--lib/CodeGen/CGExprConstant.cpp17
-rw-r--r--lib/CodeGen/CGExprScalar.cpp33
-rw-r--r--lib/CodeGen/CGObjC.cpp22
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.h2
-rw-r--r--lib/CodeGen/CGRtti.cpp21
-rw-r--r--lib/CodeGen/CGStmt.cpp5
-rw-r--r--lib/CodeGen/CGValue.h2
-rw-r--r--lib/CodeGen/CGVtable.cpp228
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp75
-rw-r--r--lib/CodeGen/CodeGenFunction.h27
-rw-r--r--lib/CodeGen/CodeGenModule.cpp36
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp2
-rw-r--r--lib/CodeGen/Mangle.cpp21
-rw-r--r--lib/CodeGen/Mangle.h4
22 files changed, 735 insertions, 326 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 987cd24e2c8b..c26921969a88 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -199,6 +199,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
}
case Builtin::BI__builtin_object_size: {
+ // FIXME: We're awaiting the llvm intrincis.
+#if 0
+ // We pass this builtin onto the optimizer so that it can
+ // figure out the object size in more complex cases.
+ const llvm::Type *ResType[] = {
+ ConvertType(E->getType())
+ };
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
+ return RValue::get(Builder.CreateCall2(F,
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1))));
+#else
// FIXME: Implement. For now we just always fail and pretend we
// don't know the object size.
llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext());
@@ -207,6 +219,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
bool UseMinimum = TypeArg.getZExtValue() & 2;
return RValue::get(
llvm::ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
+#endif
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index cfa669dc4b6e..3e854ca279b6 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -71,16 +71,17 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
const Expr *Init = D.getInit();
QualType T = D.getType();
+ bool isVolatile = getContext().getCanonicalType(T).isVolatileQualified();
if (T->isReferenceType()) {
ErrorUnsupported(Init, "global variable that binds to a reference");
} else if (!hasAggregateLLVMType(T)) {
llvm::Value *V = EmitScalarExpr(Init);
- EmitStoreOfScalar(V, DeclPtr, T.isVolatileQualified(), T);
+ EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
} else if (T->isAnyComplexType()) {
- EmitComplexExprIntoAddr(Init, DeclPtr, T.isVolatileQualified());
+ EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
- EmitAggExpr(Init, DeclPtr, T.isVolatileQualified());
+ EmitAggExpr(Init, DeclPtr, isVolatile);
if (const RecordType *RT = T->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -95,8 +96,9 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty())
return;
- const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- false);
+ const llvm::FunctionType *FTy
+ = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
// Create our global initialization function.
// FIXME: Should this be tweakable by targets?
@@ -139,18 +141,20 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
// Create the guard variable.
llvm::GlobalValue *GuardV =
- new llvm::GlobalVariable(CGM.getModule(), llvm::Type::getInt64Ty(VMContext), false,
- GV->getLinkage(),
- llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)),
+ new llvm::GlobalVariable(CGM.getModule(), llvm::Type::getInt64Ty(VMContext),
+ false, GV->getLinkage(),
+ llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)),
GuardVName.str());
// Load the first byte of the guard variable.
- const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+ const llvm::Type *PtrTy
+ = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
"tmp");
// Compare it against 0.
- llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext));
+ llvm::Value *nullValue
+ = llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext));
llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool");
llvm::BasicBlock *InitBlock = createBasicBlock("init");
@@ -163,7 +167,8 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
EmitCXXGlobalVarDeclInit(D, GV);
- Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1),
+ Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
+ 1),
Builder.CreateBitCast(GuardV, PtrTy));
EmitBlock(EndBlock);
@@ -591,11 +596,16 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const CXXConstructExpr *E) {
assert(Dest && "Must have a destination!");
const CXXConstructorDecl *CD = E->getConstructor();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(E->getType());
// For a copy constructor, even if it is trivial, must fall thru so
// its argument is code-gen'ed.
if (!CD->isCopyConstructor(getContext())) {
+ QualType InitType = E->getType();
+ if (Array)
+ InitType = getContext().getBaseElementType(Array);
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(InitType->getAs<RecordType>()->getDecl());
if (RD->hasTrivialConstructor())
return;
}
@@ -606,9 +616,18 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
EmitAggExpr((*i), Dest, false);
return;
}
- // Call the constructor.
- EmitCXXConstructorCall(CD, Ctor_Complete, Dest,
- E->arg_begin(), E->arg_end());
+ if (Array) {
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Dest, BasePtr);
+ EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr);
+ }
+ else
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Ctor_Complete, Dest,
+ E->arg_begin(), E->arg_end());
}
void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
@@ -688,32 +707,39 @@ llvm::Constant *CodeGenFunction::GenerateThunk(llvm::Function *Fn,
const CXXMethodDecl *MD,
bool Extern, int64_t nv,
int64_t v) {
- QualType R = MD->getType()->getAs<FunctionType>()->getResultType();
+ return GenerateCovariantThunk(Fn, MD, Extern, nv, v, 0, 0);
+}
- FunctionArgList Args;
- ImplicitParamDecl *ThisDecl =
- ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
- MD->getThisType(getContext()));
- Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
- for (FunctionDecl::param_const_iterator i = MD->param_begin(),
- e = MD->param_end();
- i != e; ++i) {
- ParmVarDecl *D = *i;
- Args.push_back(std::make_pair(D, D->getType()));
+llvm::Value *CodeGenFunction::DynamicTypeAdjust(llvm::Value *V, int64_t nv,
+ int64_t v) {
+ llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
+ 0);
+ const llvm::Type *OrigTy = V->getType();
+ if (nv) {
+ // Do the non-virtual adjustment
+ V = Builder.CreateBitCast(V, Ptr8Ty);
+ V = Builder.CreateConstInBoundsGEP1_64(V, nv);
+ V = Builder.CreateBitCast(V, OrigTy);
}
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__thunk_named_foo_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
- Extern
- ? FunctionDecl::Extern
- : FunctionDecl::Static,
- false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
- // FIXME: generate body
- FinishFunction();
- return Fn;
+ if (v) {
+ // Do the virtual this adjustment
+ const llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+ llvm::Type *PtrPtr8Ty, *PtrPtrDiffTy;
+ PtrPtr8Ty = llvm::PointerType::get(Ptr8Ty, 0);
+ PtrPtrDiffTy = llvm::PointerType::get(PtrDiffTy, 0);
+ llvm::Value *ThisVal = Builder.CreateBitCast(V, Ptr8Ty);
+ V = Builder.CreateBitCast(V, PtrPtrDiffTy->getPointerTo());
+ V = Builder.CreateLoad(V, "vtable");
+ llvm::Value *VTablePtr = V;
+ assert(v % (LLVMPointerWidth/8) == 0 && "vtable entry unaligned");
+ v /= LLVMPointerWidth/8;
+ V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, v);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateGEP(ThisVal, V);
+ V = Builder.CreateBitCast(V, OrigTy);
+ }
+ return V;
}
llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
@@ -723,7 +749,7 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
int64_t v_t,
int64_t nv_r,
int64_t v_r) {
- QualType R = MD->getType()->getAs<FunctionType>()->getResultType();
+ QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType();
FunctionArgList Args;
ImplicitParamDecl *ThisDecl =
@@ -740,13 +766,57 @@ llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
= &CGM.getContext().Idents.get("__thunk_named_foo_");
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
+ SourceLocation(), II, ResultType, 0,
Extern
? FunctionDecl::Extern
: FunctionDecl::Static,
false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
- // FIXME: generate body
+ StartFunction(FD, ResultType, Fn, Args, SourceLocation());
+
+ // generate body
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD, Ty);
+ CallArgList CallArgs;
+
+ QualType ArgType = MD->getThisType(getContext());
+ llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this");
+ if (nv_t || v_t) {
+ // Do the this adjustment.
+ const llvm::Type *OrigTy = Callee->getType();
+ Arg = DynamicTypeAdjust(Arg, nv_t, v_t);
+ if (nv_r || v_r) {
+ Callee = CGM.BuildCovariantThunk(MD, Extern, 0, 0, nv_r, v_r);
+ Callee = Builder.CreateBitCast(Callee, OrigTy);
+ nv_r = v_r = 0;
+ }
+ }
+
+ CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType));
+
+ for (FunctionDecl::param_const_iterator i = MD->param_begin(),
+ e = MD->param_end();
+ i != e; ++i) {
+ ParmVarDecl *D = *i;
+ QualType ArgType = D->getType();
+
+ // llvm::Value *Arg = CGF.GetAddrOfLocalVar(Dst);
+ Expr *Arg = new (getContext()) DeclRefExpr(D, ArgType, SourceLocation());
+ CallArgs.push_back(std::make_pair(EmitCallArg(Arg, ArgType), ArgType));
+ }
+
+ RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
+ Callee, CallArgs, MD);
+ if (nv_r || v_r) {
+ // Do the return result adjustment.
+ RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), nv_r, v_r));
+ }
+
+ if (!ResultType->isVoidType())
+ EmitReturnOfRValue(RV, ResultType);
+
FinishFunction();
return Fn;
}
@@ -769,7 +839,6 @@ llvm::Constant *CodeGenModule::BuildThunk(const CXXMethodDecl *MD, bool Extern,
llvm::Function *Fn = llvm::Function::Create(FTy, linktype, Out.str(),
&getModule());
CodeGenFunction(*this).GenerateThunk(Fn, MD, Extern, nv, v);
- // Fn = Builder.CreateBitCast(Fn, Ptr8Ty);
llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
return m;
}
@@ -795,7 +864,6 @@ llvm::Constant *CodeGenModule::BuildCovariantThunk(const CXXMethodDecl *MD,
&getModule());
CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, nv_t, v_t, nv_r,
v_r);
- // Fn = Builder.CreateBitCast(Fn, Ptr8Ty);
llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
return m;
}
@@ -815,7 +883,7 @@ CodeGenFunction::GetVirtualCXXBaseClassOffset(llvm::Value *This,
CGM.getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
- Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
const llvm::Type *PtrDiffTy =
ConvertType(getContext().getPointerDiffType());
@@ -899,7 +967,7 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
- BaseCopyCtor->getParamDecl(0)->getType()));
+ BaseCopyCtor->getParamDecl(0)->getType()));
QualType ResultType =
BaseCopyCtor->getType()->getAs<FunctionType>()->getResultType();
EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
@@ -1099,11 +1167,11 @@ CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
FinishFunction();
}
-/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a copy
-/// constructor, in accordance with section 12.8 (p7 and p8) of C++03
+/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a
+/// copy constructor, in accordance with section 12.8 (p7 and p8) of C++03
/// The implicitly-defined copy constructor for class X performs a memberwise
-/// copy of its subobjects. The order of copying is the same as the order
-/// of initialization of bases and members in a user-defined constructor
+/// copy of its subobjects. The order of copying is the same as the order of
+/// initialization of bases and members in a user-defined constructor
/// Each subobject is copied in the manner appropriate to its type:
/// if the subobject is of class type, the copy constructor for the class is
/// used;
@@ -1121,7 +1189,7 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
const FunctionArgList &Args) {
const CXXRecordDecl *ClassDecl = Ctor->getParent();
assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
- "SynthesizeCXXCopyConstructor - copy constructor has definition already");
+ "SynthesizeCXXCopyConstructor - copy constructor has definition already");
StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args,
SourceLocation());
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 78655168e857..06cd05cc75a8 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -441,6 +441,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs |= llvm::Attribute::NoAlias;
}
+ if (CompileOpts.OptimizeSize)
+ FuncAttrs |= llvm::Attribute::OptimizeForSize;
if (CompileOpts.DisableRedZone)
FuncAttrs |= llvm::Attribute::NoRedZone;
if (CompileOpts.NoImplicitFloat)
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 1728c67292b7..b1ceb4627712 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CompileOptions.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
@@ -316,6 +317,20 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
+
+ // All constant structs and arrays should be global if
+ // their initializer is constant and if the element type is POD.
+ if (CGM.getCompileOpts().MergeAllConstants) {
+ if (Ty.isConstant(getContext())
+ && (Ty->isArrayType() || Ty->isRecordType())
+ && (D.getInit()
+ && D.getInit()->isConstantInitializer(getContext()))
+ && Ty->isPODType()) {
+ EmitStaticBlockVarDecl(D);
+ return;
+ }
+ }
+
// A normal fixed sized variable becomes an alloca in the entry block.
const llvm::Type *LTy = ConvertTypeForMem(Ty);
Align = getContext().getDeclAlignInBytes(&D);
@@ -417,17 +432,18 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
D.getNameAsString());
+ bool isVolatile = (getContext().getCanonicalType(D.getType())
+ .isVolatileQualified());
if (Ty->isReferenceType()) {
RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
- EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
- D.getType());
+ EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
} else if (Init->getType()->isAnyComplexType()) {
- EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
+ EmitComplexExprIntoAddr(Init, Loc, isVolatile);
} else {
- EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
+ EmitAggExpr(Init, Loc, isVolatile);
}
}
@@ -492,16 +508,25 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
// Handle CXX destruction of variables.
QualType DtorTy(Ty);
if (const ArrayType *Array = DtorTy->getAs<ArrayType>())
- DtorTy = Array->getElementType();
+ DtorTy = getContext().getBaseElementType(Array);
if (const RecordType *RT = DtorTy->getAs<RecordType>())
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!ClassDecl->hasTrivialDestructor()) {
const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
assert(D && "EmitLocalBlockVarDecl - destructor is nul");
- assert(!Ty->getAs<ArrayType>() && "FIXME - destruction of arrays NYI");
-
+
CleanupScope scope(*this);
- EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
+ if (const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(Ty)) {
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(DeclPtr, BasePtr);
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+ }
+ else
+ EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
}
}
@@ -547,6 +572,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
QualType Ty = D.getType();
+ CanQualType CTy = getContext().getCanonicalType(Ty);
llvm::Value *DeclPtr;
if (!Ty->isConstantSizeType()) {
@@ -563,7 +589,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
DeclPtr->setName(Name.c_str());
// Store the initial value into the alloca.
- EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty);
+ EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Ty);
} else {
// Otherwise, if this is an aggregate, just use the input pointer.
DeclPtr = Arg;
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
new file mode 100644
index 000000000000..adfd0055f936
--- /dev/null
+++ b/lib/CodeGen/CGException.cpp
@@ -0,0 +1,105 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
+ // void *__cxa_allocate_exception(size_t thrown_size);
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ std::vector<const llvm::Type*> Args(1, SizeTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_throw (void *thrown_exception, std::type_info *tinfo,
+ // void (*dest) (void *) );
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(3, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+ Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+ // FIXME: Handle rethrows.
+ if (!E->getSubExpr()) {
+ ErrorUnsupported(E, "rethrow expression");
+ return;
+ }
+
+ QualType ThrowType = E->getSubExpr()->getType();
+ // FIXME: We only handle non-class types for now.
+ if (ThrowType->isRecordType()) {
+ ErrorUnsupported(E, "throw expression");
+ return;
+ }
+
+ // FIXME: Handle cleanup.
+ if (!CleanupEntries.empty()){
+ ErrorUnsupported(E, "throw expression");
+ return;
+ }
+
+ // Now allocate the exception object.
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ uint64_t TypeSize = getContext().getTypeSize(ThrowType) / 8;
+
+ llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
+ llvm::Value *ExceptionPtr =
+ Builder.CreateCall(AllocExceptionFn,
+ llvm::ConstantInt::get(SizeTy, TypeSize),
+ "exception");
+
+ // Store the throw exception in the exception object.
+ if (!hasAggregateLLVMType(ThrowType)) {
+ llvm::Value *Value = EmitScalarExpr(E->getSubExpr());
+ const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
+
+ Builder.CreateStore(Value, Builder.CreateBitCast(ExceptionPtr, ValuePtrTy));
+ } else {
+ // FIXME: Handle complex and aggregate expressions.
+ ErrorUnsupported(E, "throw expression");
+ }
+
+ // Now throw the exception.
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ mangleCXXRtti(CGM.getMangleContext(), ThrowType, Out);
+
+ // FIXME: Is it OK to use CreateRuntimeVariable for this?
+ llvm::Constant *TypeInfo =
+ CGM.CreateRuntimeVariable(llvm::Type::getInt8Ty(getLLVMContext()),
+ OutName.c_str());
+ llvm::Constant *Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+
+ llvm::CallInst *ThrowCall =
+ Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index bb487f6e3fdf..d9dd70ac9e49 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -84,10 +84,9 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
ShouldDestroyTemporaries = TE->shouldDestroyTemporaries();
- if (ShouldDestroyTemporaries) {
- // Keep track of the current cleanup stack depth.
+ // Keep track of the current cleanup stack depth.
+ if (ShouldDestroyTemporaries)
OldNumLiveTemporaries = LiveTemporaries.size();
- }
E = TE->getSubExpr();
}
@@ -148,7 +147,6 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
// Check if need to perform the derived-to-base cast.
if (BaseClassDecl) {
llvm::Value *Derived = Val.getAggregateAddr();
-
llvm::Value *Base =
GetAddressCXXOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl,
/*NullCheckValue=*/false);
@@ -189,18 +187,21 @@ unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
//===----------------------------------------------------------------------===//
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
- if (Ty->isVoidType()) {
+ if (Ty->isVoidType())
return RValue::get(0);
- } else if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
const llvm::Type *EltTy = ConvertType(CTy->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return RValue::getComplex(std::make_pair(U, U));
- } else if (hasAggregateLLVMType(Ty)) {
+ }
+
+ if (hasAggregateLLVMType(Ty)) {
const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
return RValue::getAggregate(llvm::UndefValue::get(LTy));
- } else {
- return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
}
+
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
}
RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
@@ -245,7 +246,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
- case Expr::QualifiedDeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::PredefinedExprClass:
@@ -343,9 +343,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
if (LV.isObjCWeak()) {
// load of a __weak object.
llvm::Value *AddrWeakObj = LV.getAddress();
- llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this,
- AddrWeakObj);
- return RValue::get(read_weak);
+ return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj));
}
if (LV.isSimple()) {
@@ -522,10 +521,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isPropertyRef())
return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
- if (Dst.isKVCRef())
- return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
-
- assert(0 && "Unknown LValue type");
+ assert(Dst.isKVCRef() && "Unknown LValue type");
+ return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
}
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
@@ -551,8 +548,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
BytesBetween);
- }
- else if (Dst.isGlobalObjCRef())
+ } else if (Dst.isGlobalObjCRef())
CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
else
CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
@@ -702,13 +698,12 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
unsigned i;
for (i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), i));
+ ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
for (; i != NumDstElts; ++i)
- ExtMask.push_back(llvm::UndefValue::get(
- llvm::Type::getInt32Ty(VMContext)));
+ ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
ExtMask.size());
llvm::Value *ExtSrcVal =
@@ -717,15 +712,13 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
ExtMaskV, "tmp");
// build identity
llvm::SmallVector<llvm::Constant*, 4> Mask;
- for (unsigned i = 0; i != NumDstElts; ++i) {
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), i));
- }
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned Idx = getAccessedFieldNo(i, Elts);
- Mask[Idx] = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), i+NumDstElts);
+ Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
@@ -736,8 +729,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
- llvm::Value *Elt = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), InIdx);
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
}
@@ -747,8 +740,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// setObjCGCLValueClass - sets class of he lvalue for the purpose of
// generating write-barries API. It is currently a global, ivar,
// or neither.
-static
-void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) {
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+ LValue &LV) {
if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
return;
@@ -759,6 +752,7 @@ void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) {
LV.SetObjCArray(LV, E->getType()->isArrayType());
return;
}
+
if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
@@ -766,10 +760,15 @@ void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) {
LV.SetGlobalObjCRef(LV, true);
}
LV.SetObjCArray(LV, E->getType()->isArrayType());
+ return;
}
- else if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E))
+
+ if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
- else if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+ return;
+ }
+
+ if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
if (LV.isObjCIvar()) {
// If cast is to a structure pointer, follow gcc's behavior and make it
@@ -779,13 +778,20 @@ void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) {
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
if (ExpTy->isRecordType())
LV.SetObjCIvar(LV, false);
- }
+ }
+ return;
}
- else if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E))
+ if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
- else if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E))
+ return;
+ }
+
+ if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
- else if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ return;
+ }
+
+ if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
if (LV.isObjCIvar() && !LV.isObjCArray())
// Using array syntax to assigning to what an ivar points to is not
@@ -795,12 +801,15 @@ void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) {
// Using array syntax to assigning to what global points to is not
// same as assigning to the global itself. {id *G;} G[i] = 0;
LV.SetGlobalObjCRef(LV, false);
+ return;
}
- else if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+
+ if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
// We don't know if member is an 'ivar', but this flag is looked at
// only in the context of LV.isObjCIvar().
LV.SetObjCArray(LV, E->getType()->isArrayType());
+ return;
}
}
@@ -839,14 +848,18 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue::SetObjCNonGC(LV, NonGCable);
setObjCGCLValueClass(getContext(), E, LV);
return LV;
- } else if (VD && VD->isFileVarDecl()) {
+ }
+
+ if (VD && VD->isFileVarDecl()) {
llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
setObjCGCLValueClass(getContext(), E, LV);
return LV;
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
llvm::Value* V = CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
@@ -861,15 +874,19 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
}
return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
- } else if (const ImplicitParamDecl *IPD =
- dyn_cast<ImplicitParamDecl>(E->getDecl())) {
+ }
+
+ if (const ImplicitParamDecl *IPD = dyn_cast<ImplicitParamDecl>(E->getDecl())){
llvm::Value *V = LocalDeclMap[IPD];
assert(V && "BlockVarDecl not entered in LocalDeclMap?");
return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
- } else if (const QualifiedDeclRefExpr *QDRExpr =
- dyn_cast<QualifiedDeclRefExpr>(E)) {
- return EmitPointerToDataMemberLValue(QDRExpr);
}
+
+ if (E->getQualifier()) {
+ // FIXME: the qualifier check does not seem sufficient here
+ return EmitPointerToDataMemberLValue(E);
+ }
+
assert(0 && "Unimp declref");
//an invalid LValue, but the assert will
//ensure that this point is never reached.
@@ -888,25 +905,24 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
switch (E->getOpcode()) {
default: assert(0 && "Unknown unary operator lvalue!");
- case UnaryOperator::Deref:
- {
- QualType T = E->getSubExpr()->getType()->getPointeeType();
- assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
-
- Qualifiers Quals = MakeQualifiers(T);
- Quals.setAddressSpace(ExprTy.getAddressSpace());
-
- LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals);
- // We should not generate __weak write barrier on indirect reference
- // of a pointer to object; as in void foo (__weak id *param); *param = 0;
- // But, we continue to generate __strong write barrier on indirect write
- // into a pointer to object.
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
- LV.isObjCWeak())
- LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
- return LV;
- }
+ case UnaryOperator::Deref: {
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ Qualifiers Quals = MakeQualifiers(T);
+ Quals.setAddressSpace(ExprTy.getAddressSpace());
+
+ LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals);
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
case UnaryOperator::Real:
case UnaryOperator::Imag:
LValue LV = EmitLValue(E->getSubExpr());
@@ -932,8 +948,7 @@ LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
std::string GlobalVarName;
switch (Type) {
- default:
- assert(0 && "Invalid type");
+ default: assert(0 && "Invalid type");
case PredefinedExpr::Func:
GlobalVarName = "__func__.";
break;
@@ -1089,12 +1104,12 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
llvm::Constant *BaseElts = Base.getExtVectorElts();
llvm::SmallVector<llvm::Constant *, 4> CElts;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
if (isa<llvm::ConstantAggregateZero>(BaseElts))
- CElts.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0));
+ CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
else
- CElts.push_back(BaseElts->getOperand(Indices[i]));
+ CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
}
llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
@@ -1211,13 +1226,12 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
const Expr* InitExpr = E->getInitializer();
LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
- if (E->getType()->isComplexType()) {
+ if (E->getType()->isComplexType())
EmitComplexExprIntoAddr(InitExpr, DeclPtr, false);
- } else if (hasAggregateLLVMType(E->getType())) {
+ else if (hasAggregateLLVMType(E->getType()))
EmitAnyExpr(InitExpr, DeclPtr, false);
- } else {
+ else
EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType());
- }
return Result;
}
@@ -1238,9 +1252,7 @@ CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
if (!LHS.isSimple())
return EmitUnsupportedLValue(E, "conditional operator");
- llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),
- "condtmp");
-
+ llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
Builder.CreateStore(LHS.getAddress(), Temp);
EmitBranch(ContBlock);
@@ -1379,7 +1391,8 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
return EmitLValue(E->getRHS());
}
- if (E->getOpcode() == BinaryOperator::PtrMemD)
+ if (E->getOpcode() == BinaryOperator::PtrMemD ||
+ E->getOpcode() == BinaryOperator::PtrMemI)
return EmitPointerToDataMemberBinaryExpr(E);
// Can only get l-value for binary operator expressions which are a
@@ -1406,15 +1419,14 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
RValue RV = EmitCallExpr(E);
- if (RV.isScalar()) {
- assert(E->getCallReturnType()->isReferenceType() &&
- "Can't have a scalar return unless the return type is a "
- "reference type!");
-
- return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
- }
+ if (!RV.isScalar())
+ return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType()));
+
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
- return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+ return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
}
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
@@ -1439,9 +1451,7 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
-
PushCXXTemporary(E->getTemporary(), LV.getAddress());
-
return LV;
}
@@ -1497,21 +1507,18 @@ CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
}
-LValue
-CodeGenFunction::EmitObjCKVCRefLValue(
+LValue CodeGenFunction::EmitObjCKVCRefLValue(
const ObjCImplicitSetterGetterRefExpr *E) {
// This is a special l-value that just issues sends when we load or store
// through it.
return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
}
-LValue
-CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
+LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
return EmitUnsupportedLValue(E, "use of super");
}
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
-
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
// FIXME: can this be volatile?
@@ -1519,8 +1526,7 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
}
-LValue CodeGenFunction::EmitPointerToDataMemberLValue(
- const QualifiedDeclRefExpr *E) {
+LValue CodeGenFunction::EmitPointerToDataMemberLValue(const DeclRefExpr *E) {
const FieldDecl *Field = cast<FieldDecl>(E->getDecl());
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Field->getDeclContext());
QualType NNSpecTy =
@@ -1530,12 +1536,9 @@ LValue CodeGenFunction::EmitPointerToDataMemberLValue(
llvm::Value *V = llvm::Constant::getNullValue(ConvertType(NNSpecTy));
LValue MemExpLV = EmitLValueForField(V, const_cast<FieldDecl*>(Field),
/*isUnion*/false, /*Qualifiers*/0);
- const llvm::Type* ResultType = ConvertType(
- getContext().getPointerDiffType());
- V = Builder.CreatePtrToInt(MemExpLV.getAddress(), ResultType,
- "datamember");
- LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
- return LV;
+ const llvm::Type *ResultType = ConvertType(getContext().getPointerDiffType());
+ V = Builder.CreatePtrToInt(MemExpLV.getAddress(), ResultType, "datamember");
+ return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
}
RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType,
@@ -1566,9 +1569,11 @@ RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType,
Callee, Args, TargetDecl);
}
-LValue CodeGenFunction::EmitPointerToDataMemberBinaryExpr(
- const BinaryOperator *E) {
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
llvm::Value *BaseV = EmitLValue(E->getLHS()).getAddress();
+ if (E->getOpcode() == BinaryOperator::PtrMemI)
+ BaseV = Builder.CreateLoad(BaseV, "indir.ptr");
const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext());
BaseV = Builder.CreateBitCast(BaseV, i8Ty);
LValue RHSLV = EmitLValue(E->getRHS());
@@ -1577,13 +1582,12 @@ LValue CodeGenFunction::EmitPointerToDataMemberBinaryExpr(
const llvm::Type* ResultType = ConvertType(getContext().getPointerDiffType());
OffsetV = Builder.CreateBitCast(OffsetV, ResultType);
llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr");
+
QualType Ty = E->getRHS()->getType();
- const MemberPointerType *MemPtrType = Ty->getAs<MemberPointerType>();
- Ty = MemPtrType->getPointeeType();
- const llvm::Type* PType =
- ConvertType(getContext().getPointerType(Ty));
+ Ty = Ty->getAs<MemberPointerType>()->getPointeeType();
+
+ const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty));
AddV = Builder.CreateBitCast(AddV, PType);
- LValue LV = LValue::MakeAddr(AddV, MakeQualifiers(Ty));
- return LV;
+ return LValue::MakeAddr(AddV, MakeQualifiers(Ty));
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index f47b6ab3c8cc..901f867a5913 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -297,7 +297,7 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
assert(MPT->getPointeeType()->isFunctionProtoType() &&
"Unexpected member pointer type!");
- const QualifiedDeclRefExpr *DRE = cast<QualifiedDeclRefExpr>(E->getSubExpr());
+ const DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
const CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
const llvm::Type *PtrDiffTy =
@@ -329,7 +329,8 @@ void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
}
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
- if (E->getOpcode() == BinaryOperator::PtrMemD)
+ if (E->getOpcode() == BinaryOperator::PtrMemD ||
+ E->getOpcode() == BinaryOperator::PtrMemI)
VisitPointerToDataMemberBinaryOperator(E);
else
CGF.ErrorUnsupported(E, "aggregate binary expression");
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index fc3748c8e3c8..9145d92128a1 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -228,7 +228,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder {
if (NumBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumBytes);
- llvm::Constant *C = llvm::Constant::getNullValue(Ty);
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
Elements.push_back(C);
assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
@@ -266,7 +266,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder {
if (NumBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumBytes);
- llvm::Constant *Padding = llvm::Constant::getNullValue(Ty);
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
PackedElements.push_back(Padding);
ElementOffsetInBytes += getSizeInBytes(Padding);
}
@@ -434,7 +434,7 @@ public:
E->getType()->getAs<MemberPointerType>()) {
QualType T = MPT->getPointeeType();
if (T->isFunctionProtoType()) {
- QualifiedDeclRefExpr *DRE = cast<QualifiedDeclRefExpr>(E->getSubExpr());
+ DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
return EmitMemberFunctionPointer(cast<CXXMethodDecl>(DRE->getDecl()));
}
@@ -496,7 +496,7 @@ public:
if (NumPadBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
- Elts.push_back(llvm::Constant::getNullValue(Ty));
+ Elts.push_back(llvm::UndefValue::get(Ty));
Types.push_back(Ty);
}
@@ -739,8 +739,7 @@ public:
E->getType().getAddressSpace());
return C;
}
- case Expr::DeclRefExprClass:
- case Expr::QualifiedDeclRefExprClass: {
+ case Expr::DeclRefExprClass: {
NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
return CGM.GetAddrOfFunction(FD);
@@ -777,11 +776,17 @@ public:
}
case Expr::AddrLabelExprClass: {
assert(CGF && "Invalid address of label expression outside function.");
+#ifndef USEINDIRECTBRANCH
unsigned id =
CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
llvm::Constant *C =
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), id);
return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
+#else
+ llvm::Constant *Ptr =
+ CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+#endif
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 69604f9aaaee..96b58d8995ae 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -135,11 +135,16 @@ public:
}
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+#ifndef USEINDIRECTBRANCH
llvm::Value *V =
llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
CGF.GetIDForAddrOfLabel(E->getLabel()));
return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
+#else
+ llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+ return Builder.CreateBitCast(V, ConvertType(E->getType()));
+#endif
}
// l-values.
@@ -272,7 +277,12 @@ public:
Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
-
+
+ Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ CGF.EmitCXXThrowExpr(E);
+ return 0;
+ }
+
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (CGF.getContext().getLangOptions().OverflowChecking
@@ -678,14 +688,13 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
// input is the same width as the vector being constructed, generate an
// optimized shuffle of the swizzle input into the result.
+ unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
if (isa<ExtVectorElementExpr>(IE)) {
llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
Value *SVOp = SVI->getOperand(0);
const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
if (OpTy->getNumElements() == ResElts) {
- unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
-
for (unsigned j = 0; j != CurIdx; ++j) {
// If the current vector initializer is a shuffle with undef, merge
// this shuffle directly into it.
@@ -717,13 +726,13 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(llvm::UndefValue::get(I32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
- Mask, "vecext");
+ Mask, "vext");
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
Args.push_back(llvm::ConstantInt::get(I32Ty, j));
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j+ResElts));
+ Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset));
for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
Args.push_back(llvm::UndefValue::get(I32Ty));
}
@@ -1639,9 +1648,10 @@ Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
/// expression is cheap enough and side-effect-free enough to evaluate
/// unconditionally instead of conditionally. This is used to convert control
/// flow into selects in some cases.
-static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+ CodeGenFunction &CGF) {
if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
- return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr());
+ return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF);
// TODO: Allow anything we can constant fold to an integer or fp constant.
if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
@@ -1652,7 +1662,9 @@ static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
// X and Y are local variables.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
- if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified())
+ if (VD->hasLocalStorage() && !(CGF.getContext()
+ .getCanonicalType(VD->getType())
+ .isVolatileQualified()))
return true;
return false;
@@ -1681,8 +1693,9 @@ VisitConditionalOperator(const ConditionalOperator *E) {
// If this is a really simple expression (like x ? 4 : 5), emit this as a
// select instead of as control flow. We can only do this if it is cheap and
// safe to evaluate the LHS and RHS unconditionally.
- if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) &&
- isCheapEnoughToEvaluateUnconditionally(E->getRHS())) {
+ if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(),
+ CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) {
llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
llvm::Value *LHS = Visit(E->getLHS());
llvm::Value *RHS = Visit(E->getRHS());
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index cadba328bf12..2fe3f5b1b443 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -280,17 +280,29 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
SetPropertyFn, Args);
} else {
+ // FIXME: Find a clean way to avoid AST node creation.
SourceLocation Loc = PD->getLocation();
ValueDecl *Self = OMD->getSelfDecl();
ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
DeclRefExpr Base(Self, Self->getType(), Loc);
ParmVarDecl *ArgDecl = *OMD->param_begin();
DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
- ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base,
- true, true);
- BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
- Ivar->getType(), Loc);
- EmitStmt(&Assign);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ if (getContext().getCanonicalType(Ivar->getType()) !=
+ getContext().getCanonicalType(ArgDecl->getType())) {
+ ImplicitCastExpr ArgCasted(Ivar->getType(), CastExpr::CK_BitCast, &Arg,
+ false);
+ BinaryOperator Assign(&IvarRef, &ArgCasted, BinaryOperator::Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ } else {
+ BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ }
}
FinishFunction();
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.h b/lib/CodeGen/CGRecordLayoutBuilder.h
index d1a13aa29711..4ebf4e88decb 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.h
+++ b/lib/CodeGen/CGRecordLayoutBuilder.h
@@ -15,7 +15,7 @@
#define CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/System/DataTypes.h"
#include <vector>
namespace llvm {
diff --git a/lib/CodeGen/CGRtti.cpp b/lib/CodeGen/CGRtti.cpp
index 7bc774fce75b..7af15f0a8ca4 100644
--- a/lib/CodeGen/CGRtti.cpp
+++ b/lib/CodeGen/CGRtti.cpp
@@ -16,32 +16,31 @@ using namespace clang;
using namespace CodeGen;
llvm::Constant *CodeGenModule::GenerateRtti(const CXXRecordDecl *RD) {
- llvm::Type *Ptr8Ty;
- Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
- llvm::Constant *Rtti = llvm::Constant::getNullValue(Ptr8Ty);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
if (!getContext().getLangOptions().Rtti)
- return Rtti;
+ return llvm::Constant::getNullValue(Int8PtrTy);
llvm::SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
- mangleCXXRtti(getMangleContext(), RD, Out);
+ mangleCXXRtti(getMangleContext(), Context.getTagDeclType(RD), Out);
llvm::GlobalVariable::LinkageTypes linktype;
linktype = llvm::GlobalValue::WeakAnyLinkage;
std::vector<llvm::Constant *> info;
// assert(0 && "FIXME: implement rtti descriptor");
// FIXME: descriptor
- info.push_back(llvm::Constant::getNullValue(Ptr8Ty));
+ info.push_back(llvm::Constant::getNullValue(Int8PtrTy));
// assert(0 && "FIXME: implement rtti ts");
// FIXME: TS
- info.push_back(llvm::Constant::getNullValue(Ptr8Ty));
+ info.push_back(llvm::Constant::getNullValue(Int8PtrTy));
llvm::Constant *C;
- llvm::ArrayType *type = llvm::ArrayType::get(Ptr8Ty, info.size());
+ llvm::ArrayType *type = llvm::ArrayType::get(Int8PtrTy, info.size());
C = llvm::ConstantArray::get(type, info);
- Rtti = new llvm::GlobalVariable(getModule(), type, true, linktype, C,
- Out.str());
- Rtti = llvm::ConstantExpr::getBitCast(Rtti, Ptr8Ty);
+ llvm::Constant *Rtti =
+ new llvm::GlobalVariable(getModule(), type, true, linktype, C,
+ Out.str());
+ Rtti = llvm::ConstantExpr::getBitCast(Rtti, Int8PtrTy);
return Rtti;
}
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index f58b57926787..9126c2c99cdf 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -287,8 +287,13 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
// Emit initial switch which will be patched up later by
// EmitIndirectSwitches(). We need a default dest, so we use the
// current BB, but this is overwritten.
+#ifndef USEINDIRECTBRANCH
llvm::Value *V = Builder.CreatePtrToInt(EmitScalarExpr(S.getTarget()),
llvm::Type::getInt32Ty(VMContext),
+#else
+ llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+ llvm::Type::getInt8PtrTy(VMContext),
+#endif
"addr");
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 2a06f51f6685..fa77471bce0e 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -47,7 +47,7 @@ public:
bool isVolatileQualified() const { return Volatile; }
- /// getScalar() - Return the Value* of this scalar value.
+ /// getScalarVal() - Return the Value* of this scalar value.
llvm::Value *getScalarVal() const {
assert(isScalar() && "Not a scalar!");
return V1;
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
index 9df0e1abd547..e2e11478de26 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVtable.cpp
@@ -43,6 +43,9 @@ private:
llvm::DenseMap<const CXXMethodDecl *, Index_t> VCall;
llvm::DenseMap<const CXXMethodDecl *, Index_t> VCallOffset;
llvm::DenseMap<const CXXRecordDecl *, Index_t> VBIndex;
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, int> Pures_t;
+ Pures_t Pures;
typedef std::pair<Index_t, Index_t> CallOffset;
typedef llvm::DenseMap<const CXXMethodDecl *, CallOffset> Thunks_t;
Thunks_t Thunks;
@@ -58,6 +61,7 @@ private:
Index_t extra;
int CurrentVBaseOffset;
typedef std::vector<std::pair<const CXXRecordDecl *, int64_t> > Path_t;
+ llvm::Constant *cxa_pure;
public:
VtableBuilder(std::vector<llvm::Constant *> &meth,
const CXXRecordDecl *c,
@@ -68,6 +72,13 @@ public:
LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0)),
CurrentVBaseOffset(0) {
Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+
+ // Calculate pointer for ___cxa_pure_virtual.
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ cxa_pure = wrap(CGM.CreateRuntimeFunction(FTy, "__cxa_pure_virtual"));
}
llvm::DenseMap<const CXXMethodDecl *, Index_t> &getIndex() { return Index; }
@@ -84,8 +95,10 @@ public:
return llvm::ConstantExpr::getBitCast(m, Ptr8Ty);
}
- void GenerateVBaseOffsets(std::vector<llvm::Constant *> &offsets,
- const CXXRecordDecl *RD, uint64_t Offset,
+#define D1(x)
+//#define D1(X) do { if (getenv("DEBUG")) { X; } } while (0)
+
+ void GenerateVBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
bool updateVBIndex, Index_t current_vbindex) {
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end(); i != e; ++i) {
@@ -94,22 +107,24 @@ public:
Index_t next_vbindex = current_vbindex;
if (i->isVirtual() && !SeenVBase.count(Base)) {
SeenVBase.insert(Base);
- int64_t BaseOffset = -(Offset/8) + BLayout.getVBaseClassOffset(Base)/8;
- llvm::Constant *m = wrap(BaseOffset);
- m = wrap((0?700:0) + BaseOffset);
if (updateVBIndex) {
- next_vbindex = (ssize_t)(-(offsets.size()*LLVMPointerWidth/8)
+ next_vbindex = (ssize_t)(-(VCalls.size()*LLVMPointerWidth/8)
- 3*LLVMPointerWidth/8);
VBIndex[Base] = next_vbindex;
}
- offsets.push_back(m);
+ int64_t BaseOffset = -(Offset/8) + BLayout.getVBaseClassOffset(Base)/8;
+ VCalls.push_back((0?700:0) + BaseOffset);
+ D1(printf(" vbase for %s at %d delta %d most derived %s\n",
+ Base->getNameAsCString(),
+ (int)-VCalls.size()-3, (int)BaseOffset,
+ Class->getNameAsCString()));
}
// We also record offsets for non-virtual bases to closest enclosing
// virtual base. We do this so that we don't have to search
// for the nearst virtual base class when generating thunks.
if (updateVBIndex && VBIndex.count(Base) == 0)
VBIndex[Base] = next_vbindex;
- GenerateVBaseOffsets(offsets, Base, Offset, updateVBIndex, next_vbindex);
+ GenerateVBaseOffsets(Base, Offset, updateVBIndex, next_vbindex);
}
}
@@ -144,8 +159,8 @@ public:
/// getNVOffset - Returns the non-virtual offset for the given (B) base of the
/// derived class D.
Index_t getNVOffset(QualType qB, QualType qD) {
- qD = qD->getAs<PointerType>()->getPointeeType();
- qB = qB->getAs<PointerType>()->getPointeeType();
+ qD = qD->getPointeeType();
+ qB = qB->getPointeeType();
CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
int64_t o = getNVOffset_1(D, B);
@@ -159,8 +174,8 @@ public:
/// getVbaseOffset - Returns the index into the vtable for the virtual base
/// offset for the given (B) virtual base of the derived class D.
Index_t getVbaseOffset(QualType qB, QualType qD) {
- qD = qD->getAs<PointerType>()->getPointeeType();
- qB = qB->getAs<PointerType>()->getPointeeType();
+ qD = qD->getPointeeType();
+ qB = qB->getPointeeType();
CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
if (D != Class)
@@ -177,6 +192,7 @@ public:
bool OverrideMethod(const CXXMethodDecl *MD, llvm::Constant *m,
bool MorallyVirtual, Index_t OverrideOffset,
Index_t Offset) {
+ const bool isPure = MD->isPure();
typedef CXXMethodDecl::method_iterator meth_iter;
// FIXME: Should OverrideOffset's be Offset?
@@ -215,7 +231,9 @@ public:
}
Index[MD] = i;
submethods[i] = m;
-
+ if (isPure)
+ Pures[MD] = 1;
+ Pures.erase(OMD);
Thunks.erase(OMD);
if (MorallyVirtual) {
Index_t &idx = VCall[OMD];
@@ -223,9 +241,17 @@ public:
VCallOffset[MD] = OverrideOffset/8;
idx = VCalls.size()+1;
VCalls.push_back(0);
+ D1(printf(" vcall for %s at %d with delta %d most derived %s\n",
+ MD->getNameAsCString(),
+ (int)-VCalls.size()-3, (int)VCallOffset[MD],
+ Class->getNameAsCString()));
} else {
VCallOffset[MD] = VCallOffset[OMD];
VCalls[idx-1] = -VCallOffset[OMD] + OverrideOffset/8;
+ D1(printf(" vcall patch for %s at %d with delta %d most derived %s\n",
+ MD->getNameAsCString(),
+ (int)-VCalls.size()-3, (int)VCallOffset[MD],
+ Class->getNameAsCString()));
}
VCall[MD] = idx;
CallOffset ThisOffset;
@@ -237,7 +263,7 @@ public:
CovariantThunks[MD] = std::make_pair(std::make_pair(ThisOffset,
ReturnOffset),
oret);
- else
+ else if (!isPure)
Thunks[MD] = ThisOffset;
return true;
}
@@ -252,7 +278,7 @@ public:
CovariantThunks[MD] = std::make_pair(std::make_pair(ThisOffset,
ReturnOffset),
oret);
- else
+ else if (!isPure)
Thunks[MD] = ThisOffset;
}
return true;
@@ -266,6 +292,7 @@ public:
for (Thunks_t::iterator i = Thunks.begin(), e = Thunks.end();
i != e; ++i) {
const CXXMethodDecl *MD = i->first;
+ assert(!MD->isPure() && "Trying to thunk a pure");
Index_t idx = Index[MD];
Index_t nv_O = i->second.first;
Index_t v_O = i->second.second;
@@ -276,6 +303,8 @@ public:
e = CovariantThunks.end();
i != e; ++i) {
const CXXMethodDecl *MD = i->first;
+ if (MD->isPure())
+ continue;
Index_t idx = Index[MD];
Index_t nv_t = i->second.first.first.first;
Index_t v_t = i->second.first.first.second;
@@ -285,6 +314,25 @@ public:
v_r);
}
CovariantThunks.clear();
+ for (Pures_t::iterator i = Pures.begin(), e = Pures.end();
+ i != e; ++i) {
+ const CXXMethodDecl *MD = i->first;
+ Index_t idx = Index[MD];
+ submethods[idx] = cxa_pure;
+ }
+ Pures.clear();
+ }
+
+ llvm::Constant *WrapAddrOf(const CXXMethodDecl *MD) {
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD))
+ return wrap(CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete));
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+
+ return wrap(CGM.GetAddrOfFunction(MD, Ty));
}
void OverrideMethods(Path_t *Path, bool MorallyVirtual, int64_t Offset) {
@@ -298,37 +346,16 @@ public:
continue;
const CXXMethodDecl *MD = *mi;
- llvm::Constant *m = 0;
- if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD))
- m = wrap(CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete));
- else {
- const FunctionProtoType *FPT =
- MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- m = wrap(CGM.GetAddrOfFunction(MD, Ty));
- }
-
+ llvm::Constant *m = WrapAddrOf(MD);
OverrideMethod(MD, m, MorallyVirtual, OverrideOffset, Offset);
}
}
}
- void AddMethod(const CXXMethodDecl *MD, bool MorallyVirtual, Index_t Offset) {
- llvm::Constant *m = 0;
- if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD))
- m = wrap(CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete));
- else {
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- m = wrap(CGM.GetAddrOfFunction(MD, Ty));
- }
-
+ void AddMethod(const CXXMethodDecl *MD, bool MorallyVirtual, Index_t Offset,
+ bool ForVirtualBase) {
+ llvm::Constant *m = WrapAddrOf(MD);
+
// If we can find a previously allocated slot for this, reuse it.
if (OverrideMethod(MD, m, MorallyVirtual, Offset, Offset))
return;
@@ -336,6 +363,9 @@ public:
// else allocate a new slot.
Index[MD] = submethods.size();
submethods.push_back(m);
+ D1(printf(" vfn for %s at %d\n", MD->getNameAsCString(), (int)Index[MD]));
+ if (MD->isPure())
+ Pures[MD] = 1;
if (MorallyVirtual) {
VCallOffset[MD] = Offset/8;
Index_t &idx = VCall[MD];
@@ -343,16 +373,19 @@ public:
if (idx == 0) {
idx = VCalls.size()+1;
VCalls.push_back(0);
+ D1(printf(" vcall for %s at %d with delta %d\n",
+ MD->getNameAsCString(), (int)-VCalls.size()-3,
+ (int)VCallOffset[MD]));
}
}
}
void AddMethods(const CXXRecordDecl *RD, bool MorallyVirtual,
- Index_t Offset) {
+ Index_t Offset, bool RDisVirtualBase) {
for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
++mi)
if (mi->isVirtual())
- AddMethod(*mi, MorallyVirtual, Offset);
+ AddMethod(*mi, MorallyVirtual, Offset, RDisVirtualBase);
}
void NonVirtualBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout,
@@ -381,6 +414,7 @@ public:
void insertVCalls(int InsertionPoint) {
llvm::Constant *e = 0;
+ D1(printf("============= combining vbase/vcall\n"));
D(VCalls.insert(VCalls.begin(), 673));
D(VCalls.push_back(672));
methods.insert(methods.begin() + InsertionPoint, VCalls.size(), e);
@@ -392,11 +426,10 @@ public:
VCalls.clear();
}
- Index_t end(const CXXRecordDecl *RD, std::vector<llvm::Constant *> &offsets,
- const ASTRecordLayout &Layout,
- const CXXRecordDecl *PrimaryBase,
- bool PrimaryBaseWasVirtual, bool MorallyVirtual,
- int64_t Offset, bool ForVirtualBase, Path_t *Path) {
+ Index_t end(const CXXRecordDecl *RD, const ASTRecordLayout &Layout,
+ const CXXRecordDecl *PrimaryBase, bool PrimaryBaseWasVirtual,
+ bool MorallyVirtual, int64_t Offset, bool ForVirtualBase,
+ Path_t *Path) {
bool alloc = false;
if (Path == 0) {
alloc = true;
@@ -405,16 +438,6 @@ public:
StartNewTable();
extra = 0;
- // FIXME: Cleanup.
- if (!ForVirtualBase) {
- D(methods.push_back(wrap(666)));
- // then virtual base offsets...
- for (std::vector<llvm::Constant *>::reverse_iterator i = offsets.rbegin(),
- e = offsets.rend(); i != e; ++i)
- methods.push_back(*i);
- D(methods.push_back(wrap(667)));
- }
-
bool DeferVCalls = MorallyVirtual || ForVirtualBase;
int VCallInsertionPoint = methods.size();
if (!DeferVCalls) {
@@ -423,20 +446,12 @@ public:
// FIXME: just for extra, or for all uses of VCalls.size post this?
extra = -VCalls.size();
- if (ForVirtualBase) {
- D(methods.push_back(wrap(668)));
- // then virtual base offsets...
- for (std::vector<llvm::Constant *>::reverse_iterator i = offsets.rbegin(),
- e = offsets.rend(); i != e; ++i)
- methods.push_back(*i);
- D(methods.push_back(wrap(669)));
- }
-
methods.push_back(wrap(-(Offset/8)));
methods.push_back(rtti);
Index_t AddressPoint = methods.size();
InstallThunks();
+ D1(printf("============= combining methods\n"));
methods.insert(methods.end(), submethods.begin(), submethods.end());
submethods.clear();
@@ -445,10 +460,8 @@ public:
MorallyVirtual, Offset, Path);
if (ForVirtualBase) {
- D(methods.push_back(wrap(670)));
insertVCalls(VCallInsertionPoint);
AddressPoint += VCalls.size();
- D(methods.push_back(wrap(671)));
}
if (alloc) {
@@ -457,7 +470,36 @@ public:
return AddressPoint;
}
- void Primaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset) {
+ void Primaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset,
+ bool updateVBIndex, Index_t current_vbindex,
+ bool RDisVirtualBase) {
+ if (!RD->isDynamicClass())
+ return;
+
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+
+ // vtables are composed from the chain of primaries.
+ if (PrimaryBase) {
+ D1(printf(" doing primaries for %s most derived %s\n",
+ RD->getNameAsCString(), Class->getNameAsCString()));
+
+ if (!PrimaryBaseWasVirtual)
+ Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
+ updateVBIndex, current_vbindex, PrimaryBaseWasVirtual);
+ }
+
+ D1(printf(" doing vcall entries for %s most derived %s\n",
+ RD->getNameAsCString(), Class->getNameAsCString()));
+
+ // And add the virtuals for the class to the primary vtable.
+ AddMethods(RD, MorallyVirtual, Offset, RDisVirtualBase);
+ }
+
+ void VBPrimaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset,
+ bool updateVBIndex, Index_t current_vbindex,
+ bool RDisVirtualBase, bool bottom=false) {
if (!RD->isDynamicClass())
return;
@@ -469,11 +511,22 @@ public:
if (PrimaryBase) {
if (PrimaryBaseWasVirtual)
IndirectPrimary.insert(PrimaryBase);
- Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset);
+
+ D1(printf(" doing primaries for %s most derived %s\n",
+ RD->getNameAsCString(), Class->getNameAsCString()));
+
+ VBPrimaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
+ updateVBIndex, current_vbindex, PrimaryBaseWasVirtual);
}
- // And add the virtuals for the class to the primary vtable.
- AddMethods(RD, MorallyVirtual, Offset);
+ D1(printf(" doing vbase entries for %s most derived %s\n",
+ RD->getNameAsCString(), Class->getNameAsCString()));
+ GenerateVBaseOffsets(RD, Offset, updateVBIndex, current_vbindex);
+
+ if (RDisVirtualBase || bottom) {
+ Primaries(RD, MorallyVirtual, Offset, updateVBIndex, current_vbindex,
+ RDisVirtualBase);
+ }
}
int64_t GenerateVtableForBase(const CXXRecordDecl *RD,
@@ -487,27 +540,21 @@ public:
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
- std::vector<llvm::Constant *> offsets;
extra = 0;
- GenerateVBaseOffsets(offsets, RD, Offset, !ForVirtualBase, 0);
- if (ForVirtualBase)
- extra = offsets.size();
+ D1(printf("building entries for base %s most derived %s\n",
+ RD->getNameAsCString(), Class->getNameAsCString()));
- // vtables are composed from the chain of primaries.
- if (PrimaryBase) {
- if (PrimaryBaseWasVirtual)
- IndirectPrimary.insert(PrimaryBase);
- Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset);
- }
+ if (ForVirtualBase)
+ extra = VCalls.size();
- // And add the virtuals for the class to the primary vtable.
- AddMethods(RD, MorallyVirtual, Offset);
+ VBPrimaries(RD, MorallyVirtual, Offset, !ForVirtualBase, 0, ForVirtualBase,
+ true);
if (Path)
OverrideMethods(Path, MorallyVirtual, Offset);
- return end(RD, offsets, Layout, PrimaryBase, PrimaryBaseWasVirtual,
- MorallyVirtual, Offset, ForVirtualBase, Path);
+ return end(RD, Layout, PrimaryBase, PrimaryBaseWasVirtual, MorallyVirtual,
+ Offset, ForVirtualBase, Path);
}
void GenerateVtableForVBases(const CXXRecordDecl *RD,
@@ -532,6 +579,8 @@ public:
VCall.clear();
int64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
CurrentVBaseOffset = BaseOffset;
+ D1(printf("vtable %s virtual base %s\n",
+ Class->getNameAsCString(), Base->getNameAsCString()));
GenerateVtableForBase(Base, true, BaseOffset, true, Path);
}
int64_t BaseOffset = Offset;
@@ -567,6 +616,7 @@ int64_t CGVtableInfo::getMethodVtableIndex(const CXXMethodDecl *MD) {
// FIXME: This seems expensive. Can we do a partial job to get
// just this data.
VtableBuilder b(methods, RD, CGM);
+ D1(printf("vtable %s\n", RD->getNameAsCString()));
b.GenerateVtableForBase(RD);
b.GenerateVtableForVBases(RD);
@@ -591,6 +641,7 @@ int64_t CGVtableInfo::getVirtualBaseOffsetIndex(const CXXRecordDecl *RD,
// FIXME: This seems expensive. Can we do a partial job to get
// just this data.
VtableBuilder b(methods, RD, CGM);
+ D1(printf("vtable %s\n", RD->getNameAsCString()));
b.GenerateVtableForBase(RD);
b.GenerateVtableForVBases(RD);
@@ -614,13 +665,14 @@ llvm::Value *CodeGenFunction::GenerateVtable(const CXXRecordDecl *RD) {
mangleCXXVtable(CGM.getMangleContext(), RD, Out);
llvm::GlobalVariable::LinkageTypes linktype;
- linktype = llvm::GlobalValue::WeakAnyLinkage;
+ linktype = llvm::GlobalValue::LinkOnceODRLinkage;
std::vector<llvm::Constant *> methods;
llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
int64_t AddressPoint;
VtableBuilder b(methods, RD, CGM);
+ D1(printf("vtable %s\n", RD->getNameAsCString()));
// First comes the vtables for all the non-virtual bases...
AddressPoint = b.GenerateVtableForBase(RD);
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 2f46313c9c20..10884a7560f5 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -10,6 +10,7 @@ add_clang_library(clangCodeGen
CGCall.cpp
CGDebugInfo.cpp
CGDecl.cpp
+ CGException.cpp
CGExpr.cpp
CGExprAgg.cpp
CGExprComplex.cpp
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index ba93e5d0ebc8..88beadf33140 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -27,7 +27,11 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: BlockFunction(cgm, *this, Builder), CGM(cgm),
Target(CGM.getContext().Target),
Builder(cgm.getModule().getContext()),
+#ifndef USEINDIRECTBRANCH
DebugInfo(0), IndirectGotoSwitch(0),
+#else
+ DebugInfo(0), IndirectBranch(0),
+#endif
SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
CXXThisDecl(0) {
LLVMIntTy = ConvertType(getContext().IntTy);
@@ -130,10 +134,33 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+#ifdef USEINDIRECTBRANCH
+ // If someone did an indirect goto, emit the indirect goto block at the end of
+ // the function.
+ if (IndirectBranch) {
+ EmitBlock(IndirectBranch->getParent());
+ Builder.ClearInsertionPoint();
+ }
+
+
+#endif
// Remove the AllocaInsertPt instruction, which is just a convenience for us.
llvm::Instruction *Ptr = AllocaInsertPt;
AllocaInsertPt = 0;
Ptr->eraseFromParent();
+#ifdef USEINDIRECTBRANCH
+
+ // If someone took the address of a label but never did an indirect goto, we
+ // made a zero entry PHI node, which is illegal, zap it now.
+ if (IndirectBranch) {
+ llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+ if (PN->getNumIncomingValues() == 0) {
+ PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+ }
+
+#endif
}
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
@@ -466,13 +493,26 @@ void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
TypeInfo.second/8));
}
+#ifndef USEINDIRECTBRANCH
unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) {
// Use LabelIDs.size()+1 as the new ID if one hasn't been assigned.
unsigned &Entry = LabelIDs[L];
if (Entry) return Entry;
+#else
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
+ // Make sure that there is a block for the indirect goto.
+ if (IndirectBranch == 0)
+ GetIndirectGotoBlock();
+#endif
+#ifndef USEINDIRECTBRANCH
Entry = LabelIDs.size();
+#else
+ llvm::BasicBlock *BB = getBasicBlockForLabel(L);
+#endif
+#ifndef USEINDIRECTBRANCH
// If this is the first "address taken" of a label and the indirect goto has
// already been seen, add this to it.
if (IndirectGotoSwitch) {
@@ -488,18 +528,42 @@ unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) {
}
return Entry;
+#else
+ // Make sure the indirect branch includes all of the address-taken blocks.
+ IndirectBranch->addDestination(BB);
+ return llvm::BlockAddress::get(CurFn, BB);
+#endif
}
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+#ifndef USEINDIRECTBRANCH
// If we already made the switch stmt for indirect goto, return its block.
if (IndirectGotoSwitch) return IndirectGotoSwitch->getParent();
+#else
+ // If we already made the indirect branch for indirect goto, return its block.
+ if (IndirectBranch) return IndirectBranch->getParent();
+#endif
+#ifndef USEINDIRECTBRANCH
EmitBlock(createBasicBlock("indirectgoto"));
+#else
+ CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+#endif
+#ifndef USEINDIRECTBRANCH
+ const llvm::IntegerType *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+#else
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+#endif
+
// Create the PHI node that indirect gotos will add entries to.
- llvm::Value *DestVal =
- Builder.CreatePHI(llvm::Type::getInt32Ty(VMContext), "indirect.goto.dest");
+#ifndef USEINDIRECTBRANCH
+ llvm::Value *DestVal = Builder.CreatePHI(Int32Ty, "indirect.goto.dest");
+#else
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
+#endif
+#ifndef USEINDIRECTBRANCH
// Create the switch instruction. For now, set the insert block to this block
// which will be fixed as labels are added.
IndirectGotoSwitch = Builder.CreateSwitch(DestVal, Builder.GetInsertBlock());
@@ -524,8 +588,6 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
IndirectGotoSwitch->setSuccessor(0,
getBasicBlockForLabel(AddrTakenLabelsByID[0]));
- const llvm::IntegerType *Int32Ty = llvm::Type::getInt32Ty(VMContext);
-
// FIXME: The iteration order of this is nondeterminstic!
for (unsigned i = 1, e = AddrTakenLabelsByID.size(); i != e; ++i)
IndirectGotoSwitch->addCase(llvm::ConstantInt::get(Int32Ty, i+1),
@@ -541,6 +603,11 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
}
return IndirectGotoSwitch->getParent();
+#else
+ // Create the indirect branch instruction.
+ IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+ return IndirectBranch->getParent();
+#endif
}
llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 639e683f0369..9bb219642ab4 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -183,13 +183,22 @@ public:
void PopConditionalTempDestruction();
private:
- CGDebugInfo* DebugInfo;
+ CGDebugInfo *DebugInfo;
+#ifndef USEINDIRECTBRANCH
/// LabelIDs - Track arbitrary ids assigned to labels for use in implementing
/// the GCC address-of-label extension and indirect goto. IDs are assigned to
/// labels inside getIDForAddrOfLabel().
std::map<const LabelStmt*, unsigned> LabelIDs;
+#else
+ /// IndirectBranch - The first time an indirect goto is seen we create a
+ /// block with an indirect branch. Every time we see the address of a label
+ /// taken, we add the label to the indirect goto. Every subsequent indirect
+ /// goto is codegen'd as a jump to the IndirectBranch's basic block.
+ llvm::IndirectBrInst *IndirectBranch;
+#endif
+#ifndef USEINDIRECTBRANCH
/// IndirectGotoSwitch - The first time an indirect goto is seen we create a
/// block with the switch for the indirect gotos. Every time we see the
/// address of a label taken, we add the label to the indirect goto. Every
@@ -197,6 +206,7 @@ private:
/// IndirectGotoSwitch's basic block.
llvm::SwitchInst *IndirectGotoSwitch;
+#endif
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
@@ -377,6 +387,11 @@ public:
/// GenerateVtable - Generate the vtable for the given type.
llvm::Value *GenerateVtable(const CXXRecordDecl *RD);
+ /// DynamicTypeAdjust - Do the non-virtual and virtual adjustments on an
+ /// object pointer to alter the dynamic type of the pointer. Used by
+ /// GenerateCovariantThunk for building thunks.
+ llvm::Value *DynamicTypeAdjust(llvm::Value *V, int64_t nv, int64_t v);
+
/// GenerateThunk - Generate a thunk for the given method
llvm::Constant *GenerateThunk(llvm::Function *Fn, const CXXMethodDecl *MD,
bool Extern, int64_t nv, int64_t v);
@@ -502,7 +517,7 @@ public:
//===--------------------------------------------------------------------===//
Qualifiers MakeQualifiers(QualType T) {
- Qualifiers Quals = T.getQualifiers();
+ Qualifiers Quals = getContext().getCanonicalType(T).getQualifiers();
Quals.setObjCGCAttr(getContext().getObjCGCAttrKind(T));
return Quals;
}
@@ -558,7 +573,11 @@ public:
/// the input field number being accessed.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+#ifndef USEINDIRECTBRANCH
unsigned GetIDForAddrOfLabel(const LabelStmt *L);
+#else
+ llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
+#endif
llvm::BasicBlock *GetIndirectGotoBlock();
/// EmitMemSetToZero - Generate code to memset a value of the given type to 0.
@@ -819,7 +838,7 @@ public:
LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXZeroInitValueExpr *E);
- LValue EmitPointerToDataMemberLValue(const QualifiedDeclRefExpr *E);
+ LValue EmitPointerToDataMemberLValue(const DeclRefExpr *E);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
@@ -1000,6 +1019,8 @@ public:
bool IsAggLocVolatile = false,
bool IsInitializer = false);
+ void EmitCXXThrowExpr(const CXXThrowExpr *E);
+
//===--------------------------------------------------------------------===//
// Internal Helpers
//===--------------------------------------------------------------------===//
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index ea84829b78a5..db609f62453c 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -253,6 +253,10 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
if (FD->isInAnonymousNamespace())
return CodeGenModule::GVA_Internal;
+ // "static" functions get internal linkage.
+ if (FD->getStorageClass() == FunctionDecl::Static && !isa<CXXMethodDecl>(FD))
+ return CodeGenModule::GVA_Internal;
+
// The kind of external linkage this function will have, if it is not
// inline or static.
CodeGenModule::GVALinkage External = CodeGenModule::GVA_StrongExternal;
@@ -260,19 +264,7 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
External = CodeGenModule::GVA_TemplateInstantiation;
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- // C++ member functions defined inside the class are always inline.
- if (MD->isInline() || !MD->isOutOfLine())
- return CodeGenModule::GVA_CXXInline;
-
- return External;
- }
-
- // "static" functions get internal linkage.
- if (FD->getStorageClass() == FunctionDecl::Static)
- return CodeGenModule::GVA_Internal;
-
- if (!FD->isInline())
+ if (!FD->isInlined())
return External;
if (!Features.CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
@@ -285,8 +277,16 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
return CodeGenModule::GVA_C99Inline;
}
- // C++ inline semantics
- assert(Features.CPlusPlus && "Must be in C++ mode");
+ // C++0x [temp.explicit]p9:
+ // [ Note: The intent is that an inline function that is the subject of
+ // an explicit instantiation declaration will still be implicitly
+ // instantiated when used so that the body can be considered for
+ // inlining, but that no out-of-line copy of the inline function would be
+ // generated in the translation unit. -- end note ]
+ if (FD->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return CodeGenModule::GVA_C99Inline;
+
return CodeGenModule::GVA_CXXInline;
}
@@ -601,6 +601,10 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+ PrettyStackTraceDecl CrashInfo((ValueDecl *)D, D->getLocation(),
+ Context.getSourceManager(),
+ "Generating code for declaration");
+
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
EmitCXXConstructor(CD, GD.getCtorType());
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
@@ -949,7 +953,7 @@ GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
return CodeGenModule::GVA_StrongExternal;
case TSK_ExplicitInstantiationDeclaration:
- assert(false && "Variable should not be instantiated");
+ llvm::llvm_unreachable("Variable should not be instantiated");
// Fall through to treat this like any other instantiation.
case TSK_ImplicitInstantiation:
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index dedf824ef9fd..d43d13e26bbb 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -180,7 +180,7 @@ static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
}
const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
- const clang::Type &Ty = *Context.getCanonicalType(T);
+ const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
switch (Ty.getTypeClass()) {
#define TYPE(Class, Base)
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 2e6034bbcd97..a5b34527969b 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -52,7 +52,8 @@ namespace {
void mangleGuardVariable(const VarDecl *D);
void mangleCXXVtable(const CXXRecordDecl *RD);
- void mangleCXXRtti(const CXXRecordDecl *RD);
+ void mangleCXXVTT(const CXXRecordDecl *RD);
+ void mangleCXXRtti(QualType Ty);
void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type);
void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type);
@@ -114,6 +115,7 @@ namespace {
}
static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
for (const DeclContext *DC = D->getDeclContext();
!DC->isTranslationUnit(); DC = DC->getParent()) {
if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
@@ -204,10 +206,17 @@ void CXXNameMangler::mangleCXXVtable(const CXXRecordDecl *RD) {
mangleName(RD);
}
-void CXXNameMangler::mangleCXXRtti(const CXXRecordDecl *RD) {
+void CXXNameMangler::mangleCXXVTT(const CXXRecordDecl *RD) {
+ // <special-name> ::= TT <type> # VTT structure
+ Out << "_ZTT";
+ mangleName(RD);
+}
+
+void CXXNameMangler::mangleCXXRtti(QualType Ty) {
// <special-name> ::= TI <type> # typeinfo structure
Out << "_ZTI";
- mangleName(RD);
+
+ mangleType(Ty);
}
void CXXNameMangler::mangleGuardVariable(const VarDecl *D) {
@@ -1355,7 +1364,7 @@ namespace clang {
"Mangling declaration");
CXXNameMangler Mangler(Context, os);
- if (!Mangler.mangle(cast<NamedDecl>(D->getCanonicalDecl())))
+ if (!Mangler.mangle(D))
return false;
os.flush();
@@ -1424,10 +1433,10 @@ namespace clang {
os.flush();
}
- void mangleCXXRtti(MangleContext &Context, const CXXRecordDecl *RD,
+ void mangleCXXRtti(MangleContext &Context, QualType Ty,
llvm::raw_ostream &os) {
CXXNameMangler Mangler(Context, os);
- Mangler.mangleCXXRtti(RD);
+ Mangler.mangleCXXRtti(Ty);
os.flush();
}
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
index 2cdb4e23919d..7f46a106f32a 100644
--- a/lib/CodeGen/Mangle.h
+++ b/lib/CodeGen/Mangle.h
@@ -65,7 +65,9 @@ namespace clang {
llvm::raw_ostream &os);
void mangleCXXVtable(MangleContext &Context, const CXXRecordDecl *RD,
llvm::raw_ostream &os);
- void mangleCXXRtti(MangleContext &Context, const CXXRecordDecl *RD,
+ void mangleCXXVTT(MangleContext &Context, const CXXRecordDecl *RD,
+ llvm::raw_ostream &os);
+ void mangleCXXRtti(MangleContext &Context, QualType T,
llvm::raw_ostream &os);
void mangleCXXCtor(MangleContext &Context, const CXXConstructorDecl *D,
CXXCtorType Type, llvm::raw_ostream &os);