aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-06-01 20:58:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-06-01 20:58:49 +0000
commit416ada0f75bab22b084a1776deb229cd4a669c4d (patch)
tree6eb65f3790434471361628af6199b07a4de92de7 /lib
parent550ae89a710bf458d47e5b1d183f5e7039c2b384 (diff)
downloadsrc-416ada0f75bab22b084a1776deb229cd4a669c4d.tar.gz
src-416ada0f75bab22b084a1776deb229cd4a669c4d.zip
Vendor import of clang trunk r304460:vendor/clang/clang-trunk-r304460
Notes
Notes: svn path=/vendor/clang/dist/; revision=319463 svn path=/vendor/clang/clang-trunk-r304460/; revision=319464; tag=vendor/clang/clang-trunk-r304460
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ODRHash.cpp28
-rw-r--r--lib/Basic/Targets.cpp16
-rw-r--r--lib/CodeGen/ABIInfo.h1
-rw-r--r--lib/CodeGen/CGCall.cpp18
-rw-r--r--lib/CodeGen/CGCleanup.cpp7
-rw-r--r--lib/CodeGen/CGExpr.cpp60
-rw-r--r--lib/CodeGen/CGExprScalar.cpp210
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp6
-rw-r--r--lib/CodeGen/CGVTables.cpp2
-rw-r--r--lib/CodeGen/CodeGenFunction.h8
-rw-r--r--lib/CodeGen/CodeGenModule.cpp31
-rw-r--r--lib/CodeGen/CodeGenModule.h11
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp47
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp3
-rw-r--r--lib/CodeGen/TargetInfo.cpp22
-rw-r--r--lib/Driver/ToolChains/Clang.cpp63
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp12
-rw-r--r--lib/Format/UnwrappedLineParser.cpp25
-rw-r--r--lib/Frontend/CompilerInvocation.cpp4
-rw-r--r--lib/Frontend/FrontendAction.cpp20
-rw-r--r--lib/Lex/HeaderSearch.cpp17
-rw-r--r--lib/Sema/SemaCoroutine.cpp8
-rw-r--r--lib/Sema/SemaDecl.cpp11
-rw-r--r--lib/Sema/SemaDeclObjC.cpp4
-rw-r--r--lib/Sema/SemaExprCXX.cpp33
-rw-r--r--lib/Sema/SemaOverload.cpp298
-rw-r--r--lib/Sema/SemaType.cpp6
-rw-r--r--lib/Serialization/ASTWriter.cpp9
28 files changed, 686 insertions, 294 deletions
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index 24371db64d07..0e822ce35b8c 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -110,7 +110,24 @@ void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {
}
}
-void ODRHash::AddTemplateName(TemplateName Name) {}
+void ODRHash::AddTemplateName(TemplateName Name) {
+ auto Kind = Name.getKind();
+ ID.AddInteger(Kind);
+
+ switch (Kind) {
+ case TemplateName::Template:
+ AddDecl(Name.getAsTemplateDecl());
+ break;
+ // TODO: Support these cases.
+ case TemplateName::OverloadedTemplate:
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::DependentTemplate:
+ case TemplateName::SubstTemplateTemplateParm:
+ case TemplateName::SubstTemplateTemplateParmPack:
+ break;
+ }
+}
+
void ODRHash::AddTemplateArgument(TemplateArgument TA) {}
void ODRHash::AddTemplateParameterList(const TemplateParameterList *TPL) {}
@@ -492,6 +509,15 @@ public:
AddQualType(T->getNamedType());
VisitTypeWithKeyword(T);
}
+
+ void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
+ ID.AddInteger(T->getNumArgs());
+ for (const auto &TA : T->template_arguments()) {
+ Hash.AddTemplateArgument(TA);
+ }
+ Hash.AddTemplateName(T->getTemplateName());
+ VisitType(T);
+ }
};
void ODRHash::AddType(const Type *T) {
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 6be83d22a256..605f73802afb 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -3123,6 +3123,7 @@ public:
case CC_Swift:
case CC_X86Pascal:
case CC_IntelOclBicc:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4834,6 +4835,7 @@ public:
case CC_PreserveMost:
case CC_PreserveAll:
case CC_X86RegCall:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4907,6 +4909,7 @@ public:
case CC_X86_64SysV:
case CC_Swift:
case CC_X86RegCall:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -5860,6 +5863,7 @@ public:
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_Swift:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -6019,6 +6023,7 @@ public:
case CC_X86VectorCall:
return CCCR_Ignore;
case CC_C:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -6329,6 +6334,7 @@ public:
case CC_Swift:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -7380,6 +7386,7 @@ public:
switch (CC) {
case CC_C:
case CC_Swift:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -7663,6 +7670,15 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
return None;
}
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_OpenCLKernel:
+ return CCCR_OK;
+ }
+ }
};
class MipsTargetInfo : public TargetInfo {
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index c0be60ef53bc..e4dce2f2a004 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -149,7 +149,6 @@ namespace swiftcall {
return info->supportsSwift();
}
};
-
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index c677d9887acc..8f405eee6e52 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -707,6 +707,12 @@ CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
signature.getRequiredArgs());
}
+namespace clang {
+namespace CodeGen {
+void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
+}
+}
+
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
@@ -741,12 +747,16 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
bool inserted = FunctionsBeingProcessed.insert(FI).second;
(void)inserted;
assert(inserted && "Recursively being processed?");
-
+
// Compute ABI information.
- if (info.getCC() != CC_Swift) {
- getABIInfo().computeInfo(*FI);
- } else {
+ if (CC == llvm::CallingConv::SPIR_KERNEL) {
+ // Force target independent argument handling for the host visible
+ // kernel functions.
+ computeSPIRKernelABIInfo(CGM, *FI);
+ } else if (info.getCC() == CC_Swift) {
swiftcall::computeABIInfo(CGM, *FI);
+ } else {
+ getABIInfo().computeInfo(*FI);
}
// Loop over all of the computed argument and return value info. If any of
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index e8bcf0a3ac56..b5453bc11e30 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -448,6 +448,13 @@ void CodeGenFunction::PopCleanupBlocks(
auto *Inst = dyn_cast_or_null<llvm::Instruction>(*ReloadedValue);
if (!Inst)
continue;
+
+ // Don't spill static allocas, they dominate all cleanups. These are created
+ // by binding a reference to a local variable or temporary.
+ auto *AI = dyn_cast<llvm::AllocaInst>(Inst);
+ if (AI && AI->isStaticAlloca())
+ continue;
+
Address Tmp =
CreateDefaultAlignTempAlloca(Inst->getType(), "tmp.exprcleanup");
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index b918a663ce5c..84ce896506d5 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -3002,9 +3002,10 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
llvm::Value *ptr,
ArrayRef<llvm::Value*> indices,
bool inbounds,
+ SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
if (inbounds) {
- return CGF.Builder.CreateInBoundsGEP(ptr, indices, name);
+ return CGF.EmitCheckedInBoundsGEP(ptr, indices, loc, name);
} else {
return CGF.Builder.CreateGEP(ptr, indices, name);
}
@@ -3035,8 +3036,9 @@ static QualType getFixedSizeElementType(const ASTContext &ctx,
}
static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
- ArrayRef<llvm::Value*> indices,
+ ArrayRef<llvm::Value *> indices,
QualType eltType, bool inbounds,
+ SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
// All the indices except that last must be zero.
#ifndef NDEBUG
@@ -3057,7 +3059,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
llvm::Value *eltPtr =
- emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, name);
+ emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, loc, name);
return Address(eltPtr, eltAlign);
}
@@ -3110,7 +3112,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
Address Addr = EmitExtVectorElementLValue(LV);
QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
- Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true);
+ Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
+ E->getExprLoc());
return MakeAddrLValue(Addr, EltType, LV.getBaseInfo());
}
@@ -3138,7 +3141,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
}
Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
@@ -3163,8 +3167,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Do the GEP.
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
- llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false);
+ llvm::Value *EltPtr = emitArraySubscriptGEP(
+ *this, Addr.getPointer(), ScaledIdx, false, E->getExprLoc());
Addr = Address(EltPtr, EltAlign);
// Cast back.
@@ -3189,14 +3193,16 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
{CGM.getSize(CharUnits::Zero()), Idx},
E->getType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
} else {
// The base must be a pointer; emit it with an estimate of its alignment.
Addr = EmitPointerWithAlignment(E->getBase(), &BaseInfo);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
}
LValue LV = MakeAddrLValue(Addr, E->getType(), BaseInfo);
@@ -3368,7 +3374,8 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
else
Idx = Builder.CreateNSWMul(Idx, NumElements);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -3387,13 +3394,15 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
// Propagate the alignment from the array itself to the result.
EltPtr = emitArraySubscriptGEP(
*this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
- ResultExprTy, !getLangOpts().isSignedOverflowDefined());
+ ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
} else {
Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
BaseTy, ResultExprTy, IsLowerBound);
EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
- !getLangOpts().isSignedOverflowDefined());
+ !getLangOpts().isSignedOverflowDefined(),
+ E->getExprLoc());
}
return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo);
@@ -3530,6 +3539,25 @@ static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
}
+static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
+ const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
+ if (!RD)
+ return false;
+
+ if (RD->isDynamicClass())
+ return true;
+
+ for (const auto &Base : RD->bases())
+ if (hasAnyVptr(Base.getType(), Context))
+ return true;
+
+ for (const FieldDecl *Field : RD->fields())
+ if (hasAnyVptr(Field->getType(), Context))
+ return true;
+
+ return false;
+}
+
LValue CodeGenFunction::EmitLValueForField(LValue base,
const FieldDecl *field) {
LValueBaseInfo BaseInfo = base.getBaseInfo();
@@ -3572,6 +3600,14 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
assert(!type->isReferenceType() && "union has reference member");
// TODO: handle path-aware TBAA for union.
TBAAPath = false;
+
+ const auto FieldType = field->getType();
+ if (CGM.getCodeGenOpts().StrictVTablePointers &&
+ hasAnyVptr(FieldType, getContext()))
+ // Because unions can easily skip invariant.barriers, we need to add
+ // a barrier every time CXXRecord field with vptr is referenced.
+ addr = Address(Builder.CreateInvariantGroupBarrier(addr.getPointer()),
+ addr.getAlignment());
} else {
// For structs, we GEP to the field that the record layout suggests.
addr = emitAddrOfFieldStorage(*this, addr, field);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 048b50d8261d..d604b4130a23 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -30,6 +30,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
@@ -44,6 +45,43 @@ using llvm::Value;
//===----------------------------------------------------------------------===//
namespace {
+
+/// Determine whether the given binary operation may overflow.
+/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
+/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
+/// the returned overflow check is precise. The returned value is 'true' for
+/// all other opcodes, to be conservative.
+bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
+ BinaryOperator::Opcode Opcode, bool Signed,
+ llvm::APInt &Result) {
+ // Assume overflow is possible, unless we can prove otherwise.
+ bool Overflow = true;
+ const auto &LHSAP = LHS->getValue();
+ const auto &RHSAP = RHS->getValue();
+ if (Opcode == BO_Add) {
+ if (Signed)
+ Result = LHSAP.sadd_ov(RHSAP, Overflow);
+ else
+ Result = LHSAP.uadd_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Sub) {
+ if (Signed)
+ Result = LHSAP.ssub_ov(RHSAP, Overflow);
+ else
+ Result = LHSAP.usub_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Mul) {
+ if (Signed)
+ Result = LHSAP.smul_ov(RHSAP, Overflow);
+ else
+ Result = LHSAP.umul_ov(RHSAP, Overflow);
+ } else if (Opcode == BO_Div || Opcode == BO_Rem) {
+ if (Signed && !RHS->isZero())
+ Result = LHSAP.sdiv_ov(RHSAP, Overflow);
+ else
+ return false;
+ }
+ return Overflow;
+}
+
struct BinOpInfo {
Value *LHS;
Value *RHS;
@@ -55,37 +93,14 @@ struct BinOpInfo {
/// Check if the binop can result in integer overflow.
bool mayHaveIntegerOverflow() const {
// Without constant input, we can't rule out overflow.
- const auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
- const auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
+ auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
+ auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
if (!LHSCI || !RHSCI)
return true;
- // Assume overflow is possible, unless we can prove otherwise.
- bool Overflow = true;
- const auto &LHSAP = LHSCI->getValue();
- const auto &RHSAP = RHSCI->getValue();
- if (Opcode == BO_Add) {
- if (Ty->hasSignedIntegerRepresentation())
- (void)LHSAP.sadd_ov(RHSAP, Overflow);
- else
- (void)LHSAP.uadd_ov(RHSAP, Overflow);
- } else if (Opcode == BO_Sub) {
- if (Ty->hasSignedIntegerRepresentation())
- (void)LHSAP.ssub_ov(RHSAP, Overflow);
- else
- (void)LHSAP.usub_ov(RHSAP, Overflow);
- } else if (Opcode == BO_Mul) {
- if (Ty->hasSignedIntegerRepresentation())
- (void)LHSAP.smul_ov(RHSAP, Overflow);
- else
- (void)LHSAP.umul_ov(RHSAP, Overflow);
- } else if (Opcode == BO_Div || Opcode == BO_Rem) {
- if (Ty->hasSignedIntegerRepresentation() && !RHSCI->isZero())
- (void)LHSAP.sdiv_ov(RHSAP, Overflow);
- else
- return false;
- }
- return Overflow;
+ llvm::APInt Result;
+ return ::mayHaveIntegerOverflow(
+ LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
}
/// Check if the binop computes a division or a remainder.
@@ -1925,7 +1940,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
- value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
+ value = CGF.EmitCheckedInBoundsGEP(value, numElts, E->getExprLoc(),
+ "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
@@ -1935,7 +1951,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.funcptr");
else
- value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = CGF.EmitCheckedInBoundsGEP(value, amt, E->getExprLoc(),
+ "incdec.funcptr");
value = Builder.CreateBitCast(value, input->getType());
// For everything else, we can just do a simple increment.
@@ -1944,7 +1961,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, amt, "incdec.ptr");
else
- value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
+ value = CGF.EmitCheckedInBoundsGEP(value, amt, E->getExprLoc(),
+ "incdec.ptr");
}
// Vector increment/decrement.
@@ -2025,7 +2043,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
else
- value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = CGF.EmitCheckedInBoundsGEP(value, sizeValue, E->getExprLoc(),
+ "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
@@ -2692,7 +2711,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
} else {
index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ pointer = CGF.EmitCheckedInBoundsGEP(pointer, index, op.E->getExprLoc(),
+ "add.ptr");
}
return pointer;
}
@@ -2709,7 +2729,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (CGF.getLangOpts().isSignedOverflowDefined())
return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
- return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ return CGF.EmitCheckedInBoundsGEP(pointer, index, op.E->getExprLoc(),
+ "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
@@ -3824,3 +3845,124 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue(
llvm_unreachable("Unhandled compound assignment operator");
}
+
+Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ SourceLocation Loc,
+ const Twine &Name) {
+ Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
+
+ // If the pointer overflow sanitizer isn't enabled, do nothing.
+ if (!SanOpts.has(SanitizerKind::PointerOverflow))
+ return GEPVal;
+
+ // If the GEP has already been reduced to a constant, leave it be.
+ if (isa<llvm::Constant>(GEPVal))
+ return GEPVal;
+
+ // Only check for overflows in the default address space.
+ if (GEPVal->getType()->getPointerAddressSpace())
+ return GEPVal;
+
+ auto *GEP = cast<llvm::GEPOperator>(GEPVal);
+ assert(GEP->isInBounds() && "Expected inbounds GEP");
+
+ SanitizerScope SanScope(this);
+ auto &VMContext = getLLVMContext();
+ const auto &DL = CGM.getDataLayout();
+ auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
+
+ // Grab references to the signed add/mul overflow intrinsics for intptr_t.
+ auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
+ auto *SAddIntrinsic =
+ CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
+ auto *SMulIntrinsic =
+ CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
+
+ // The total (signed) byte offset for the GEP.
+ llvm::Value *TotalOffset = nullptr;
+ // The offset overflow flag - true if the total offset overflows.
+ llvm::Value *OffsetOverflows = Builder.getFalse();
+
+ /// Return the result of the given binary operation.
+ auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
+ llvm::Value *RHS) -> llvm::Value * {
+ assert(Opcode == BO_Add || Opcode == BO_Mul && "Can't eval binop");
+
+ // If the operands are constants, return a constant result.
+ if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
+ if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
+ llvm::APInt N;
+ bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
+ /*Signed=*/true, N);
+ if (HasOverflow)
+ OffsetOverflows = Builder.getTrue();
+ return llvm::ConstantInt::get(VMContext, N);
+ }
+ }
+
+ // Otherwise, compute the result with checked arithmetic.
+ auto *ResultAndOverflow = Builder.CreateCall(
+ (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
+ OffsetOverflows = Builder.CreateOr(
+ OffsetOverflows, Builder.CreateExtractValue(ResultAndOverflow, 1));
+ return Builder.CreateExtractValue(ResultAndOverflow, 0);
+ };
+
+ // Determine the total byte offset by looking at each GEP operand.
+ for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
+ GTI != GTE; ++GTI) {
+ llvm::Value *LocalOffset;
+ auto *Index = GTI.getOperand();
+ // Compute the local offset contributed by this indexing step:
+ if (auto *STy = GTI.getStructTypeOrNull()) {
+ // For struct indexing, the local offset is the byte position of the
+ // specified field.
+ unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
+ LocalOffset = llvm::ConstantInt::get(
+ IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
+ } else {
+ // Otherwise this is array-like indexing. The local offset is the index
+ // multiplied by the element size.
+ auto *ElementSize = llvm::ConstantInt::get(
+ IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
+ auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
+ LocalOffset = eval(BO_Mul, ElementSize, IndexS);
+ }
+
+ // If this is the first offset, set it as the total offset. Otherwise, add
+ // the local offset into the running total.
+ if (!TotalOffset || TotalOffset == Zero)
+ TotalOffset = LocalOffset;
+ else
+ TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
+ }
+
+ // Common case: if the total offset is zero, don't emit a check.
+ if (TotalOffset == Zero)
+ return GEPVal;
+
+ // Now that we've computed the total offset, add it to the base pointer (with
+ // wrapping semantics).
+ auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
+ auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
+
+ // The GEP is valid if:
+ // 1) The total offset doesn't overflow, and
+ // 2) The sign of the difference between the computed address and the base
+ // pointer matches the sign of the total offset.
+ llvm::Value *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
+ llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
+ auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
+ llvm::Value *ValidGEP = Builder.CreateAnd(
+ Builder.CreateNot(OffsetOverflows),
+ Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid));
+
+ llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
+ // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
+ llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
+ EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
+ SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
+
+ return GEPVal;
+}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 3e3d04672357..b5599dad3096 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -90,7 +90,11 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- QualType IvarTy = Ivar->getType().withCVRQualifiers(CVRQualifiers);
+ QualType InterfaceTy{OID->getTypeForDecl(), 0};
+ QualType ObjectPtrTy =
+ CGF.CGM.getContext().getObjCObjectPointerType(InterfaceTy);
+ QualType IvarTy =
+ Ivar->getUsageType(ObjectPtrTy).withCVRQualifiers(CVRQualifiers);
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 1869c0e809df..64b6d0d3fe9f 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -901,6 +901,8 @@ void CodeGenModule::EmitDeferredVTables() {
for (const CXXRecordDecl *RD : DeferredVTables)
if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
VTables.GenerateClassData(RD);
+ else if (shouldOpportunisticallyEmitVTables())
+ OpportunisticVTables.push_back(RD);
assert(savedSize == DeferredVTables.size() &&
"deferred extra vtables during vtable emission?");
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 526ef9a1e579..42ffd0d3efcc 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -120,6 +120,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
SANITIZER_CHECK(NonnullReturn, nonnull_return, 0) \
SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
+ SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
@@ -3551,6 +3552,13 @@ public:
/// nonnull, if \p LHS is marked _Nonnull.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc);
+ /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
+ /// detect undefined behavior when the pointer overflow sanitizer is enabled.
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ ArrayRef<llvm::Value *> IdxList,
+ SourceLocation Loc,
+ const Twine &Name = "");
+
/// \brief Emit a description of a type in a format suitable for passing to
/// a runtime sanitizer handler.
llvm::Constant *EmitCheckTypeDescriptor(QualType T);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index e4e5fce02279..c61a5f6ffa71 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -382,6 +382,7 @@ void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
void CodeGenModule::Release() {
EmitDeferred();
+ EmitVTablesOpportunistically();
applyGlobalValReplacements();
applyReplacements();
checkAliases();
@@ -472,10 +473,10 @@ void CodeGenModule::Release() {
// Width of wchar_t in bytes
uint64_t WCharWidth =
Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
- assert(LangOpts.ShortWChar ||
- llvm::TargetLibraryInfoImpl::getTargetWCharSize(Target.getTriple()) ==
- Target.getWCharWidth() / 8 &&
- "LLVM wchar_t size out of sync");
+ assert((LangOpts.ShortWChar ||
+ llvm::TargetLibraryInfoImpl::getTargetWCharSize(Target.getTriple()) ==
+ Target.getWCharWidth() / 8) &&
+ "LLVM wchar_t size out of sync");
// We need to record the widths of enums and wchar_t, so that we can generate
// the correct build attributes in the ARM backend. wchar_size is also used by
@@ -1386,6 +1387,24 @@ void CodeGenModule::EmitDeferred() {
}
}
+void CodeGenModule::EmitVTablesOpportunistically() {
+ // Try to emit external vtables as available_externally if they have emitted
+ // all inlined virtual functions. It runs after EmitDeferred() and therefore
+ // is not allowed to create new references to things that need to be emitted
+ // lazily. Note that it also uses fact that we eagerly emitting RTTI.
+
+ assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())
+ && "Only emit opportunistic vtables with optimizations");
+
+ for (const CXXRecordDecl *RD : OpportunisticVTables) {
+ assert(getVTables().isVTableExternal(RD) &&
+ "This queue should only contain external vtables");
+ if (getCXXABI().canSpeculativelyEmitVTable(RD))
+ VTables.GenerateClassData(RD);
+ }
+ OpportunisticVTables.clear();
+}
+
void CodeGenModule::EmitGlobalAnnotations() {
if (Annotations.empty())
return;
@@ -1906,6 +1925,10 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
return !isTriviallyRecursive(F);
}
+bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
+ return CodeGenOpts.OptimizationLevel > 0;
+}
+
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index e38337814ebc..0a71c635e8f0 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -341,6 +341,9 @@ private:
/// A queue of (optional) vtables to consider emitting.
std::vector<const CXXRecordDecl*> DeferredVTables;
+ /// A queue of (optional) vtables that may be emitted opportunistically.
+ std::vector<const CXXRecordDecl *> OpportunisticVTables;
+
/// List of global values which are required to be present in the object file;
/// bitcast to i8*. This is used for forcing visibility of symbols which may
/// otherwise be optimized out.
@@ -450,7 +453,7 @@ private:
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(GlobalDecl GD);
-
+ bool shouldOpportunisticallyEmitVTables();
/// Map used to be sure we don't emit the same CompoundLiteral twice.
llvm::DenseMap<const CompoundLiteralExpr *, llvm::GlobalVariable *>
EmittedCompoundLiterals;
@@ -1278,6 +1281,12 @@ private:
/// Emit any needed decls for which code generation was deferred.
void EmitDeferred();
+ /// Try to emit external vtables as available_externally if they have emitted
+ /// all inlined virtual functions. It runs after EmitDeferred() and therefore
+ /// is not allowed to create new references to things that need to be emitted
+ /// lazily.
+ void EmitVTablesOpportunistically();
+
/// Call replaceAllUsesWith on all pairs in Replacements.
void applyReplacements();
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 66f51305430a..62b0e6155f99 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -366,20 +366,30 @@ public:
void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
private:
- bool hasAnyVirtualInlineFunction(const CXXRecordDecl *RD) const {
- const auto &VtableLayout =
- CGM.getItaniumVTableContext().getVTableLayout(RD);
-
- for (const auto &VtableComponent : VtableLayout.vtable_components()) {
- // Skip empty slot.
- if (!VtableComponent.isUsedFunctionPointerKind())
- continue;
-
- const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
- if (Method->getCanonicalDecl()->isInlined())
- return true;
- }
- return false;
+ bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
+ const auto &VtableLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+
+ for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ // Skip empty slot.
+ if (!VtableComponent.isUsedFunctionPointerKind())
+ continue;
+
+ const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
+ if (!Method->getCanonicalDecl()->isInlined())
+ continue;
+
+ StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
+ auto *Entry = CGM.GetGlobalValue(Name);
+ // This checks if virtual inline function has already been emitted.
+ // Note that it is possible that this inline function would be emitted
+ // after trying to emit vtable speculatively. Because of this we do
+ // an extra pass after emitting all deferred vtables to find and emit
+ // these vtables opportunistically.
+ if (!Entry || Entry->isDeclaration())
+ return true;
+ }
+ return false;
}
bool isVTableHidden(const CXXRecordDecl *RD) const {
@@ -1687,11 +1697,11 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
if (CGM.getLangOpts().AppleKext)
return false;
- // If we don't have any inline virtual functions, and if vtable is not hidden,
- // then we are safe to emit available_externally copy of vtable.
+ // If we don't have any not emitted inline virtual function, and if vtable is
+ // not hidden, then we are safe to emit available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
- return !hasAnyVirtualInlineFunction(RD) && !isVTableHidden(RD);
+ return !hasAnyUnusedVirtualInlineFunction(RD) && !isVTableHidden(RD);
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
@@ -2576,6 +2586,9 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
if (!GV) {
// Create a new global variable.
+ // Note for the future: If we would ever like to do deferred emission of
+ // RTTI, check if emitting vtables opportunistically need any adjustment.
+
GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
/*Constant=*/true,
llvm::GlobalValue::ExternalLinkage, nullptr,
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 4cacf494e694..ff5aca88131e 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -3756,6 +3756,9 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ // Note for the future: If we would ever like to do deferred emission of
+ // RTTI, check if emitting vtables opportunistically need any adjustment.
+
// Compute the fields for the TypeDescriptor.
SmallString<256> TypeInfoString;
{
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index d0ba74119b7d..427ec06a2fff 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -398,7 +398,17 @@ TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
}
unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
- return llvm::CallingConv::C;
+ // OpenCL kernels are called via an explicit runtime API with arguments
+ // set with clSetKernelArg(), not as normal sub-functions.
+ // Return SPIR_KERNEL by default as the kernel calling convention to
+ // ensure the fingerprint is fixed such way that each OpenCL argument
+ // gets one matching argument in the produced kernel function argument
+ // list to enable feasible implementation of clSetKernelArg() with
+ // aggregates etc. In case we would use the default C calling conv here,
+ // clSetKernelArg() might break depending on the target-specific
+ // conventions; different targets might split structs passed as values
+ // to multiple function arguments etc.
+ return llvm::CallingConv::SPIR_KERNEL;
}
llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
@@ -8068,8 +8078,18 @@ public:
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
};
+
} // End anonymous namespace.
+namespace clang {
+namespace CodeGen {
+void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
+ DefaultABIInfo SPIRABI(CGM.getTypes());
+ SPIRABI.computeInfo(FI);
+}
+}
+}
+
/// Emit SPIR specific metadata: OpenCL and SPIR version.
void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 555847aeeb23..698c3aa326cb 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -3985,9 +3985,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< value;
}
+ bool CaretDefault = true;
+ bool ColumnDefault = true;
+ if (Arg *DiagArg = Args.getLastArg(options::OPT__SLASH_diagnostics_classic,
+ options::OPT__SLASH_diagnostics_column,
+ options::OPT__SLASH_diagnostics_caret)) {
+ switch (DiagArg->getOption().getID()) {
+ case options::OPT__SLASH_diagnostics_caret:
+ CaretDefault = true;
+ ColumnDefault = true;
+ break;
+ case options::OPT__SLASH_diagnostics_column:
+ CaretDefault = false;
+ ColumnDefault = true;
+ break;
+ case options::OPT__SLASH_diagnostics_classic:
+ CaretDefault = false;
+ ColumnDefault = false;
+ break;
+ }
+ }
+
// -fcaret-diagnostics is default.
if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
- options::OPT_fno_caret_diagnostics, true))
+ options::OPT_fno_caret_diagnostics, CaretDefault))
CmdArgs.push_back("-fno-caret-diagnostics");
// -fdiagnostics-fixit-info is default, only pass non-default.
@@ -4059,7 +4080,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fdiagnostics-absolute-paths");
if (!Args.hasFlag(options::OPT_fshow_column, options::OPT_fno_show_column,
- true))
+ ColumnDefault))
CmdArgs.push_back("-fno-show-column");
if (!Args.hasFlag(options::OPT_fspell_checking,
@@ -4781,14 +4802,36 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-fms-memptr-rep=virtual");
}
- if (Args.getLastArg(options::OPT__SLASH_Gd))
- CmdArgs.push_back("-fdefault-calling-conv=cdecl");
- else if (Args.getLastArg(options::OPT__SLASH_Gr))
- CmdArgs.push_back("-fdefault-calling-conv=fastcall");
- else if (Args.getLastArg(options::OPT__SLASH_Gz))
- CmdArgs.push_back("-fdefault-calling-conv=stdcall");
- else if (Args.getLastArg(options::OPT__SLASH_Gv))
- CmdArgs.push_back("-fdefault-calling-conv=vectorcall");
+ // Parse the default calling convention options.
+ if (Arg *CCArg =
+ Args.getLastArg(options::OPT__SLASH_Gd, options::OPT__SLASH_Gr,
+ options::OPT__SLASH_Gz, options::OPT__SLASH_Gv)) {
+ unsigned DCCOptId = CCArg->getOption().getID();
+ const char *DCCFlag = nullptr;
+ bool ArchSupported = true;
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
+ switch (DCCOptId) {
+ case options::OPT__SLASH_Gd:
+ DCCFlag = "-fdefault-calling-conv=cdecl";
+ break;
+ case options::OPT__SLASH_Gr:
+ ArchSupported = Arch == llvm::Triple::x86;
+ DCCFlag = "-fdefault-calling-conv=fastcall";
+ break;
+ case options::OPT__SLASH_Gz:
+ ArchSupported = Arch == llvm::Triple::x86;
+ DCCFlag = "-fdefault-calling-conv=stdcall";
+ break;
+ case options::OPT__SLASH_Gv:
+ ArchSupported = Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64;
+ DCCFlag = "-fdefault-calling-conv=vectorcall";
+ break;
+ }
+
+ // MSVC doesn't warn if /Gr or /Gz is used on x64, so we don't either.
+ if (ArchSupported && DCCFlag)
+ CmdArgs.push_back(DCCFlag);
+ }
if (Arg *A = Args.getLastArg(options::OPT_vtordisp_mode_EQ))
A->render(Args, CmdArgs);
diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp
index 1a398fd8a773..d50f8e21f62f 100644
--- a/lib/Driver/ToolChains/Gnu.cpp
+++ b/lib/Driver/ToolChains/Gnu.cpp
@@ -278,20 +278,20 @@ static void AddOpenMPLinkerScript(const ToolChain &TC, Compilation &C,
LksStream << "SECTIONS\n";
LksStream << "{\n";
- LksStream << " .omp_offloading :\n";
- LksStream << " ALIGN(0x10)\n";
- LksStream << " {\n";
- for (auto &BI : InputBinaryInfo) {
- LksStream << " . = ALIGN(0x10);\n";
+ // Put each target binary into a separate section.
+ for (const auto &BI : InputBinaryInfo) {
+ LksStream << " .omp_offloading." << BI.first << " :\n";
+ LksStream << " ALIGN(0x10)\n";
+ LksStream << " {\n";
LksStream << " PROVIDE_HIDDEN(.omp_offloading.img_start." << BI.first
<< " = .);\n";
LksStream << " " << BI.second << "\n";
LksStream << " PROVIDE_HIDDEN(.omp_offloading.img_end." << BI.first
<< " = .);\n";
+ LksStream << " }\n";
}
- LksStream << " }\n";
// Add commands to define host entries begin and end. We use 1-byte subalign
// so that the linker does not add any padding and the elements in this
// section form an array.
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index ae79ea5d8a66..eda7ef36434d 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -360,16 +360,21 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
switch (Tok->Tok.getKind()) {
case tok::l_brace:
- if (Style.Language == FormatStyle::LK_JavaScript && PrevTok &&
- PrevTok->is(tok::colon))
- // A colon indicates this code is in a type, or a braced list following
- // a label in an object literal ({a: {b: 1}}).
- // The code below could be confused by semicolons between the individual
- // members in a type member list, which would normally trigger BK_Block.
- // In both cases, this must be parsed as an inline braced init.
- Tok->BlockKind = BK_BracedInit;
- else
+ if (Style.Language == FormatStyle::LK_JavaScript && PrevTok) {
+ if (PrevTok->is(tok::colon))
+ // A colon indicates this code is in a type, or a braced list
+ // following a label in an object literal ({a: {b: 1}}). The code
+ // below could be confused by semicolons between the individual
+ // members in a type member list, which would normally trigger
+ // BK_Block. In both cases, this must be parsed as an inline braced
+ // init.
+ Tok->BlockKind = BK_BracedInit;
+ else if (PrevTok->is(tok::r_paren))
+ // `) { }` can only occur in function or method declarations in JS.
+ Tok->BlockKind = BK_Block;
+ } else {
Tok->BlockKind = BK_Unknown;
+ }
LBraceStack.push_back(Tok);
break;
case tok::r_brace:
@@ -391,6 +396,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// BlockKind later if we parse a braced list (where all blocks
// inside are by default braced lists), or when we explicitly detect
// blocks (for example while parsing lambdas).
+ // FIXME: Some of these do not apply to JS, e.g. "} {" can never be a
+ // braced list in JS.
ProbablyBracedList =
(Style.Language == FormatStyle::LK_JavaScript &&
NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 7d7e7d49e9f0..47c763d29357 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -2214,8 +2214,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
llvm::Triple T(TargetOpts.Triple);
llvm::Triple::ArchType Arch = T.getArch();
bool emitError = (DefaultCC == LangOptions::DCC_FastCall ||
- DefaultCC == LangOptions::DCC_StdCall) &&
- Arch != llvm::Triple::x86;
+ DefaultCC == LangOptions::DCC_StdCall) &&
+ Arch != llvm::Triple::x86;
emitError |= DefaultCC == LangOptions::DCC_VectorCall &&
!(Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64);
if (emitError)
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 874c1b6be41e..cd67e469ddad 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -373,10 +373,11 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
return std::error_code();
}
-static bool
-loadModuleMapForModuleBuild(CompilerInstance &CI, StringRef Filename,
- bool IsSystem, bool IsPreprocessed,
- unsigned &Offset) {
+static bool loadModuleMapForModuleBuild(CompilerInstance &CI,
+ StringRef Filename, bool IsSystem,
+ bool IsPreprocessed,
+ std::string &PresumedModuleMapFile,
+ unsigned &Offset) {
auto &SrcMgr = CI.getSourceManager();
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
@@ -388,16 +389,15 @@ loadModuleMapForModuleBuild(CompilerInstance &CI, StringRef Filename,
// line directives are not part of the module map syntax in general.
Offset = 0;
if (IsPreprocessed) {
- std::string PresumedModuleMapFile;
SourceLocation EndOfLineMarker =
ReadOriginalFileName(CI, PresumedModuleMapFile, /*AddLineNote*/true);
if (EndOfLineMarker.isValid())
Offset = CI.getSourceManager().getDecomposedLoc(EndOfLineMarker).second;
- // FIXME: Use PresumedModuleMapFile as the MODULE_MAP_FILE in the PCM.
}
// Load the module map file.
- if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset))
+ if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset,
+ PresumedModuleMapFile))
return true;
if (SrcMgr.getBuffer(ModuleMapID)->getBufferSize() == Offset)
@@ -664,15 +664,19 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (Input.getKind().getFormat() == InputKind::ModuleMap) {
CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleMap);
+ std::string PresumedModuleMapFile;
unsigned OffsetToContents;
if (loadModuleMapForModuleBuild(CI, Input.getFile(), Input.isSystem(),
- Input.isPreprocessed(), OffsetToContents))
+ Input.isPreprocessed(),
+ PresumedModuleMapFile, OffsetToContents))
goto failure;
auto *CurrentModule = prepareToBuildModule(CI, Input.getFile());
if (!CurrentModule)
goto failure;
+ CurrentModule->PresumedModuleMapFile = PresumedModuleMapFile;
+
if (OffsetToContents)
// If the module contents are in the same file, skip to them.
CI.getPreprocessor().setSkipMainFilePreamble(OffsetToContents, true);
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index f5b7c59e446f..9084bc352f76 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -1326,14 +1326,27 @@ static const FileEntry *getPrivateModuleMap(const FileEntry *File,
}
bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
- FileID ID, unsigned *Offset) {
+ FileID ID, unsigned *Offset,
+ StringRef OriginalModuleMapFile) {
// Find the directory for the module. For frameworks, that may require going
// up from the 'Modules' directory.
const DirectoryEntry *Dir = nullptr;
if (getHeaderSearchOpts().ModuleMapFileHomeIsCwd)
Dir = FileMgr.getDirectory(".");
else {
- Dir = File->getDir();
+ if (!OriginalModuleMapFile.empty()) {
+ // We're building a preprocessed module map. Find or invent the directory
+ // that it originally occupied.
+ Dir = FileMgr.getDirectory(
+ llvm::sys::path::parent_path(OriginalModuleMapFile));
+ if (!Dir) {
+ auto *FakeFile = FileMgr.getVirtualFile(OriginalModuleMapFile, 0, 0);
+ Dir = FakeFile->getDir();
+ }
+ } else {
+ Dir = File->getDir();
+ }
+
StringRef DirName(Dir->getName());
if (llvm::sys::path::filename(DirName) == "Modules") {
DirName = llvm::sys::path::parent_path(DirName);
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index ae6c35f22065..8a548c0ab861 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -391,8 +391,11 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
// [expr.await]p3 [...]
// - await-suspend is the expression e.await_suspend(h), which shall be
// a prvalue of type void or bool.
- QualType RetType = AwaitSuspend->getType();
- if (RetType != S.Context.BoolTy && RetType != S.Context.VoidTy) {
+ QualType RetType = AwaitSuspend->getCallReturnType(S.Context);
+ // non-class prvalues always have cv-unqualified types
+ QualType AdjRetType = RetType.getUnqualifiedType();
+ if (RetType->isReferenceType() ||
+ (AdjRetType != S.Context.BoolTy && AdjRetType != S.Context.VoidTy)) {
S.Diag(AwaitSuspend->getCalleeDecl()->getLocation(),
diag::err_await_suspend_invalid_return_type)
<< RetType;
@@ -437,6 +440,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
if (VD->isInvalidDecl())
return nullptr;
ActOnUninitializedDecl(VD);
+ FD->addDecl(VD);
assert(!VD->isInvalidDecl());
return VD;
}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index a9adbec4f842..ea1f7526a832 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -11116,6 +11116,17 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
<< Init->getSourceRange();
Diag(attr->getLocation(), diag::note_declared_required_constant_init_here)
<< attr->getRange();
+ if (getLangOpts().CPlusPlus11) {
+ APValue Value;
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Init->EvaluateAsInitializer(Value, getASTContext(), var, Notes);
+ for (auto &it : Notes)
+ Diag(it.first, it.second);
+ } else {
+ Diag(CacheCulprit->getExprLoc(),
+ diag::note_invalid_subexpr_in_const_expr)
+ << CacheCulprit->getSourceRange();
+ }
}
}
else if (!var->isConstexpr() && IsGlobal &&
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index 370461c4a24e..2c8080dbf02b 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -1851,10 +1851,6 @@ Decl *Sema::ActOnStartCategoryImplementation(
// FIXME: PushOnScopeChains?
CurContext->addDecl(CDecl);
- // If the interface is deprecated/unavailable, warn/error about it.
- if (IDecl)
- DiagnoseUseOfDecl(IDecl, ClassLoc);
-
// If the interface has the objc_runtime_visible attribute, we
// cannot implement a category for it.
if (IDecl && IDecl->hasAttr<ObjCRuntimeVisibleAttr>()) {
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 8500b748a3ec..a05f7a7e406b 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -4080,24 +4080,23 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
return true;
- // C++0x [meta.unary.prop] Table 49 requires the following traits to be
- // applied to a complete type.
+ // C++1z [meta.unary.prop]:
+ // remove_all_extents_t<T> shall be a complete type or cv void.
case UTT_IsAggregate:
case UTT_IsTrivial:
case UTT_IsTriviallyCopyable:
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
+ ArgTy = QualType(ArgTy->getBaseElementTypeUnsafe(), 0);
+ LLVM_FALLTHROUGH;
+ // C++1z [meta.unary.prop]:
+ // T shall be a complete type, cv void, or an array of unknown bound.
case UTT_IsDestructible:
case UTT_IsNothrowDestructible:
- // Fall-through
-
- // These trait expressions are designed to help implement predicates in
- // [meta.unary.prop] despite not being named the same. They are specified
- // by both GCC and the Embarcadero C++ compiler, and require the complete
- // type due to the overarching C++0x type predicates being implemented
- // requiring the complete type.
+ case UTT_IsTriviallyDestructible:
+ // Per the GCC type traits documentation, the same constraints apply to these.
case UTT_HasNothrowAssign:
case UTT_HasNothrowMoveAssign:
case UTT_HasNothrowConstructor:
@@ -4109,17 +4108,11 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_HasTrivialCopy:
case UTT_HasTrivialDestructor:
case UTT_HasVirtualDestructor:
- // Arrays of unknown bound are expressly allowed.
- QualType ElTy = ArgTy;
- if (ArgTy->isIncompleteArrayType())
- ElTy = S.Context.getAsArrayType(ArgTy)->getElementType();
-
- // The void type is expressly allowed.
- if (ElTy->isVoidType())
+ if (ArgTy->isIncompleteArrayType() || ArgTy->isVoidType())
return true;
return !S.RequireCompleteType(
- Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
}
}
@@ -4356,6 +4349,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
!RD->hasNonTrivialCopyAssignment();
return false;
case UTT_IsDestructible:
+ case UTT_IsTriviallyDestructible:
case UTT_IsNothrowDestructible:
// C++14 [meta.unary.prop]:
// For reference types, is_destructible<T>::value is true.
@@ -4373,6 +4367,11 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
if (T->isIncompleteType() || T->isFunctionType())
return false;
+ // A type that requires destruction (via a non-trivial destructor or ARC
+ // lifetime semantics) is not trivially-destructible.
+ if (UTT == UTT_IsTriviallyDestructible && T.isDestructedType())
+ return false;
+
// C++14 [meta.unary.prop]:
// For object types and given U equal to remove_all_extents_t<T>, if the
// expression std::declval<U&>().~U() is well-formed when treated as an
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 1ba84034fa47..7bdd8872456a 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -49,12 +49,12 @@ static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
static ExprResult
CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
bool HadMultipleCandidates,
- SourceLocation Loc = SourceLocation(),
+ SourceLocation Loc = SourceLocation(),
const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
if (S.DiagnoseUseOfDecl(FoundDecl, Loc))
- return ExprError();
+ return ExprError();
// If FoundDecl is different from Fn (such as if one is a template
- // and the other a specialization), make sure DiagnoseUseOfDecl is
+ // and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
// FIXME: This would be more comprehensively addressed by modifying
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
@@ -79,7 +79,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool CStyle,
bool AllowObjCWritebackConversion);
-static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
@@ -330,13 +330,13 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
} else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
llvm::APSInt IntConstantValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ assert(Initializer && "Unknown conversion expression");
// If it's value-dependent, we can't tell whether it's narrowing.
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- if (Initializer &&
- Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
+ if (Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
// Convert the integer to the floating type.
llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
@@ -852,7 +852,7 @@ namespace {
Expr *Saved;
};
SmallVector<Entry, 2> Entries;
-
+
public:
void save(Sema &S, Expr *&E) {
assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
@@ -863,7 +863,7 @@ namespace {
void restore() {
for (SmallVectorImpl<Entry>::iterator
- i = Entries.begin(), e = Entries.end(); i != e; ++i)
+ i = Entries.begin(), e = Entries.end(); i != e; ++i)
*i->Addr = i->Saved;
}
};
@@ -1368,9 +1368,9 @@ Sema::TryImplicitConversion(Expr *From, QualType ToType,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion) {
- return ::TryImplicitConversion(*this, From, ToType,
+ return ::TryImplicitConversion(*this, From, ToType,
SuppressUserConversions, AllowExplicit,
- InOverloadResolution, CStyle,
+ InOverloadResolution, CStyle,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
@@ -1396,7 +1396,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Objective-C ARC: Determine whether we will allow the writeback conversion.
bool AllowObjCWritebackConversion
- = getLangOpts().ObjCAutoRefCount &&
+ = getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
if (getLangOpts().ObjC1)
CheckObjCBridgeRelatedConversions(From->getLocStart(),
@@ -1592,15 +1592,15 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// if the function type matches except for [[noreturn]], it's ok
if (!S.IsFunctionConversion(FromType,
S.ExtractUnqualifiedFunctionType(ToType), resultTy))
- // otherwise, only a boolean conversion is standard
- if (!ToType->isBooleanType())
- return false;
+ // otherwise, only a boolean conversion is standard
+ if (!ToType->isBooleanType())
+ return false;
}
// Check if the "from" expression is taking the address of an overloaded
// function and recompute the FromType accordingly. Take advantage of the
// fact that non-static member functions *must* have such an address-of
- // expression.
+ // expression.
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn);
if (Method && !Method->isStatic()) {
assert(isa<UnaryOperator>(From->IgnoreParens()) &&
@@ -1638,7 +1638,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.First = ICK_Lvalue_To_Rvalue;
// C11 6.3.2.1p2:
- // ... if the lvalue has atomic type, the value has the non-atomic version
+ // ... if the lvalue has atomic type, the value has the non-atomic version
// of the type of the lvalue ...
if (const AtomicType *Atomic = FromType->getAs<AtomicType>())
FromType = Atomic->getValueType();
@@ -1890,12 +1890,12 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
}
static bool
-IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle) {
-
+
const RecordType *UT = ToType->getAsUnionType();
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
return false;
@@ -2129,7 +2129,7 @@ BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
"Invalid similarly-qualified pointer type");
/// Conversions to 'id' subsume cv-qualifier conversions.
- if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
+ if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
return ToType.getUnqualifiedType();
QualType CanonFromPointee
@@ -2139,7 +2139,7 @@ BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
if (StripObjCLifetime)
Quals.removeObjCLifetime();
-
+
// Exact qualifier match -> return the pointer type we're converting to.
if (CanonToPointee.getLocalQualifiers() == Quals) {
// ToType is exactly what we need. Return it.
@@ -2323,21 +2323,21 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
ToType, Context);
return true;
}
-
+
return false;
}
-
+
/// \brief Adopt the given qualifiers for the given type.
static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
Qualifiers TQs = T.getQualifiers();
-
+
// Check whether qualifiers already match.
if (TQs == Qs)
return T;
-
+
if (Qs.compatiblyIncludes(TQs))
return Context.getQualifiedType(T, Qs);
-
+
return Context.getQualifiedType(T.getUnqualifiedType(), Qs);
}
@@ -2352,7 +2352,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
// The set of qualifiers on the type we're converting from.
Qualifiers FromQualifiers = FromType.getQualifiers();
-
+
// First, we handle all conversions on ObjC object pointer types.
const ObjCObjectPointerType* ToObjCPtr =
ToType->getAs<ObjCObjectPointerType>();
@@ -2443,7 +2443,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
ToPointeeType->getAs<ObjCObjectPointerType>() &&
isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
IncompatibleObjC)) {
-
+
ConvertedType = Context.getPointerType(ConvertedType);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
@@ -2526,46 +2526,46 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
/// this conversion.
bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType) {
- if (!getLangOpts().ObjCAutoRefCount ||
+ if (!getLangOpts().ObjCAutoRefCount ||
Context.hasSameUnqualifiedType(FromType, ToType))
return false;
-
+
// Parameter must be a pointer to __autoreleasing (with no other qualifiers).
QualType ToPointee;
if (const PointerType *ToPointer = ToType->getAs<PointerType>())
ToPointee = ToPointer->getPointeeType();
else
return false;
-
+
Qualifiers ToQuals = ToPointee.getQualifiers();
- if (!ToPointee->isObjCLifetimeType() ||
+ if (!ToPointee->isObjCLifetimeType() ||
ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
!ToQuals.withoutObjCLifetime().empty())
return false;
-
+
// Argument must be a pointer to __strong to __weak.
QualType FromPointee;
if (const PointerType *FromPointer = FromType->getAs<PointerType>())
FromPointee = FromPointer->getPointeeType();
else
return false;
-
+
Qualifiers FromQuals = FromPointee.getQualifiers();
if (!FromPointee->isObjCLifetimeType() ||
(FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
return false;
-
+
// Make sure that we have compatible qualifiers.
FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
if (!ToQuals.compatiblyIncludes(FromQuals))
return false;
-
+
// Remove qualifiers from the pointee type we're converting from; they
// aren't used in the compatibility check belong, and we'll be adding back
// qualifiers (with __autoreleasing) if the compatibility check succeeds.
FromPointee = FromPointee.getUnqualifiedType();
-
+
// The unqualified form of the pointee types must be compatible.
ToPointee = ToPointee.getUnqualifiedType();
bool IncompatibleObjC;
@@ -2574,7 +2574,7 @@ bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
IncompatibleObjC))
return false;
-
+
/// \brief Construct the type we're converting to, which is a pointer to
/// __autoreleasing pointee.
FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
@@ -2590,7 +2590,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
ToPointeeType = ToBlockPtr->getPointeeType();
else
return false;
-
+
QualType FromPointeeType;
if (const BlockPointerType *FromBlockPtr =
FromType->getAs<BlockPointerType>())
@@ -2600,24 +2600,24 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
// We have pointer to blocks, check whether the only
// differences in the argument and result types are in Objective-C
// pointer conversions. If so, we permit the conversion.
-
+
const FunctionProtoType *FromFunctionType
= FromPointeeType->getAs<FunctionProtoType>();
const FunctionProtoType *ToFunctionType
= ToPointeeType->getAs<FunctionProtoType>();
-
+
if (!FromFunctionType || !ToFunctionType)
return false;
if (Context.hasSameType(FromPointeeType, ToPointeeType))
return true;
-
+
// Perform the quick checks that will tell us whether these
// function types are obviously different.
if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
return false;
-
+
FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo();
FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo();
if (FromEInfo != ToEInfo)
@@ -2645,7 +2645,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
else
return false;
}
-
+
// Check argument types.
for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
ArgIdx != NumArgs; ++ArgIdx) {
@@ -2666,7 +2666,7 @@ bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
if (!Context.doFunctionTypesMatchOnExtParameterInfos(FromFunctionType,
ToFunctionType))
return false;
-
+
ConvertedType = ToType;
return true;
}
@@ -3012,7 +3012,7 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
static bool isNonTrivialObjCLifetimeConversion(Qualifiers FromQuals,
Qualifiers ToQuals) {
// Converting anything to const __unsafe_unretained is trivial.
- if (ToQuals.hasConst() &&
+ if (ToQuals.hasConst() &&
ToQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
return false;
@@ -3032,7 +3032,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
FromType = Context.getCanonicalType(FromType);
ToType = Context.getCanonicalType(ToType);
ObjCLifetimeConversion = false;
-
+
// If FromType and ToType are the same type, this is not a
// qualification conversion.
if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType())
@@ -3058,7 +3058,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
// Ignore __unaligned qualifier if this type is void.
if (ToType.getUnqualifiedType()->isVoidType())
FromQuals.removeUnaligned();
-
+
// Objective-C ARC:
// Check Objective-C lifetime conversions.
if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() &&
@@ -3074,14 +3074,14 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
return false;
}
}
-
+
// Allow addition/removal of GC attributes but not changing GC attributes.
if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() &&
(!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) {
FromQuals.removeObjCGCAttr();
ToQuals.removeObjCGCAttr();
}
-
+
// -- for every j > 0, if const is in cv 1,j then const is in cv
// 2,j, and similarly for volatile.
if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals))
@@ -3119,13 +3119,13 @@ static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
const AtomicType *ToAtomic = ToType->getAs<AtomicType>();
if (!ToAtomic)
return false;
-
+
StandardConversionSequence InnerSCS;
- if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
+ if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
InOverloadResolution, InnerSCS,
CStyle, /*AllowObjCWritebackConversion=*/false))
return false;
-
+
SCS.Second = InnerSCS.Second;
SCS.setToType(1, InnerSCS.getToType(1));
SCS.Third = InnerSCS.Third;
@@ -3180,8 +3180,8 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
bool HadMultipleCandidates = (CandidateSet.size() > 1);
OverloadCandidateSet::iterator Best;
- switch (auto Result =
- CandidateSet.BestViableFunction(S, From->getLocStart(),
+ switch (auto Result =
+ CandidateSet.BestViableFunction(S, From->getLocStart(),
Best, true)) {
case OR_Deleted:
case OR_Success: {
@@ -3552,7 +3552,7 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
// Two implicit conversion sequences of the same form are
// indistinguishable conversion sequences unless one of the
// following rules apply: (C++ 13.3.3.2p3):
-
+
// List-initialization sequence L1 is a better conversion sequence than
// list-initialization sequence L2 if:
// - L1 converts to std::initializer_list<X> for some X and L2 does not, or,
@@ -3587,7 +3587,7 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
ICS1.UserDefined.After,
ICS2.UserDefined.After);
else
- Result = compareConversionFunctions(S,
+ Result = compareConversionFunctions(S,
ICS1.UserDefined.ConversionFunction,
ICS2.UserDefined.ConversionFunction);
}
@@ -3769,9 +3769,9 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const ObjCObjectPointerType* FromObjCPtr2
= FromType2->getAs<ObjCObjectPointerType>();
if (FromObjCPtr1 && FromObjCPtr2) {
- bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
+ bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
FromObjCPtr2);
- bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
+ bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
FromObjCPtr1);
if (AssignLeft != AssignRight) {
return AssignLeft? ImplicitConversionSequence::Better
@@ -3809,13 +3809,13 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
if (UnqualT1 == UnqualT2) {
// Objective-C++ ARC: If the references refer to objects with different
// lifetimes, prefer bindings that don't change lifetime.
- if (SCS1.ObjCLifetimeConversionBinding !=
+ if (SCS1.ObjCLifetimeConversionBinding !=
SCS2.ObjCLifetimeConversionBinding) {
return SCS1.ObjCLifetimeConversionBinding
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
-
+
// If the type is an array type, promote the element qualifiers to the
// type for comparison.
if (isa<ArrayType>(T1) && T1Quals)
@@ -3825,7 +3825,7 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
if (T2.isMoreQualifiedThan(T1))
return ImplicitConversionSequence::Better;
else if (T1.isMoreQualifiedThan(T2))
- return ImplicitConversionSequence::Worse;
+ return ImplicitConversionSequence::Worse;
}
}
@@ -3891,17 +3891,17 @@ CompareQualificationConversions(Sema &S,
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
-
+
// Objective-C++ ARC:
// Prefer qualification conversions not involving a change in lifetime
// to qualification conversions that do not change lifetime.
- if (SCS1.QualificationIncludesObjCLifetime !=
+ if (SCS1.QualificationIncludesObjCLifetime !=
SCS2.QualificationIncludesObjCLifetime) {
Result = SCS1.QualificationIncludesObjCLifetime
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
-
+
while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
@@ -4033,7 +4033,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
= ToType1->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *ToPtr2
= ToType2->getAs<ObjCObjectPointerType>();
-
+
if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) {
// Apply the same conversion ranking rules for Objective-C pointer types
// that we do for C++ pointers to class types. However, we employ the
@@ -4048,7 +4048,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
bool ToAssignRight
= S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1);
- // A conversion to an a non-id object pointer type or qualified 'id'
+ // A conversion to an a non-id object pointer type or qualified 'id'
// type is better than a conversion to 'id'.
if (ToPtr1->isObjCIdType() &&
(ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl()))
@@ -4056,15 +4056,15 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
if (ToPtr2->isObjCIdType() &&
(ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl()))
return ImplicitConversionSequence::Better;
-
- // A conversion to a non-id object pointer type is better than a
- // conversion to a qualified 'id' type
+
+ // A conversion to a non-id object pointer type is better than a
+ // conversion to a qualified 'id' type
if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl())
return ImplicitConversionSequence::Worse;
if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl())
return ImplicitConversionSequence::Better;
-
- // A conversion to an a non-Class object pointer type or qualified 'Class'
+
+ // A conversion to an a non-Class object pointer type or qualified 'Class'
// type is better than a conversion to 'Class'.
if (ToPtr1->isObjCClassType() &&
(ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl()))
@@ -4072,8 +4072,8 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
if (ToPtr2->isObjCClassType() &&
(ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl()))
return ImplicitConversionSequence::Better;
-
- // A conversion to a non-Class object pointer type is better than a
+
+ // A conversion to a non-Class object pointer type is better than a
// conversion to a qualified 'Class' type.
if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl())
return ImplicitConversionSequence::Worse;
@@ -4108,7 +4108,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
: ImplicitConversionSequence::Worse;
}
}
-
+
// Ranking of member-pointer types.
if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member &&
FromType1->isMemberPointerType() && FromType2->isMemberPointerType() &&
@@ -4264,9 +4264,9 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
ObjCLifetimeConversion = true;
T1Quals.removeObjCLifetime();
- T2Quals.removeObjCLifetime();
+ T2Quals.removeObjCLifetime();
}
-
+
// MS compiler ignores __unaligned qualifier for references; do the same.
T1Quals.removeUnaligned();
T2Quals.removeUnaligned();
@@ -4313,7 +4313,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
bool DerivedToBase = false;
bool ObjCConversion = false;
bool ObjCLifetimeConversion = false;
-
+
// If we are initializing an rvalue reference, don't permit conversion
// functions that return lvalues.
if (!ConvTemplate && DeclType->isRValueReferenceType()) {
@@ -4322,7 +4322,7 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
if (RefType && !RefType->getPointeeType()->isFunctionType())
continue;
}
-
+
if (!ConvTemplate &&
S.CompareReferenceRelationship(
DeclLoc,
@@ -6051,24 +6051,24 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
NumNamedArgs = Method->param_size();
if (Args.size() < NumNamedArgs)
continue;
-
+
for (unsigned i = 0; i < NumNamedArgs; i++) {
// We can't do any type-checking on a type-dependent argument.
if (Args[i]->isTypeDependent()) {
Match = false;
break;
}
-
+
ParmVarDecl *param = Method->parameters()[i];
Expr *argExpr = Args[i];
assert(argExpr && "SelectBestMethod(): missing expression");
-
+
// Strip the unbridged-cast placeholder expression off unless it's
// a consumed argument.
if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
!param->hasAttr<CFConsumedAttr>())
argExpr = stripARCUnbridgedCast(argExpr);
-
+
// If the parameter is __unknown_anytype, move on to the next method.
if (param->getType() == Context.UnknownAnyTy) {
Match = false;
@@ -6754,7 +6754,7 @@ static bool isAllowableExplicitConversion(Sema &S,
return S.isObjCPointerConversion(ConvType, ToNonRefType, ConvertedType,
IncompatibleObjC);
}
-
+
/// AddConversionCandidate - Add a C++ conversion function as a
/// candidate in the candidate set (C++ [over.match.conv],
/// C++ [over.match.copy]). From is the expression we're converting from,
@@ -6785,8 +6785,8 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// Per C++ [over.match.conv]p1, [over.match.ref]p1, an explicit conversion
// operator is only a candidate if its return type is the target type or
// can be converted to the target type with a qualification conversion.
- if (Conversion->isExplicit() &&
- !isAllowableExplicitConversion(*this, ConvType, ToType,
+ if (Conversion->isExplicit() &&
+ !isAllowableExplicitConversion(*this, ConvType, ToType,
AllowObjCConversionOnExplicit))
return;
@@ -7230,7 +7230,7 @@ class BuiltinCandidateTypeSet {
/// \brief A flag indicating whether the nullptr type was present in the
/// candidate set.
bool HasNullPtrType;
-
+
/// Sema - The semantic analysis instance where we are building the
/// candidate type set.
Sema &SemaRef;
@@ -7314,14 +7314,14 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
} else {
PointeeTy = PointerTy->getPointeeType();
}
-
+
// Don't add qualified variants of arrays. For one, they're not allowed
// (the qualifier would sink to the element type), and for another, the
// only overload situation where it matters is subscript or pointer +- int,
// and those shouldn't have qualifier variants anyway.
if (PointeeTy->isArrayType())
return true;
-
+
unsigned BaseCVR = PointeeTy.getCVRQualifiers();
bool hasVolatile = VisibleQuals.hasVolatile();
bool hasRestrict = VisibleQuals.hasRestrict();
@@ -7331,24 +7331,24 @@ BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
if ((CVR | BaseCVR) != CVR) continue;
// Skip over volatile if no volatile found anywhere in the types.
if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue;
-
+
// Skip over restrict if no restrict found anywhere in the types, or if
// the type cannot be restrict-qualified.
if ((CVR & Qualifiers::Restrict) &&
(!hasRestrict ||
(!(PointeeTy->isAnyPointerType() || PointeeTy->isReferenceType()))))
continue;
-
+
// Build qualified pointee type.
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
-
+
// Build qualified pointer type.
QualType QPointerTy;
if (!buildObjCPtr)
QPointerTy = Context.getPointerType(QPointeeTy);
else
QPointerTy = Context.getObjCObjectPointerType(QPointeeTy);
-
+
// Insert qualified pointer type.
PointerTypes.insert(QPointerTy);
}
@@ -7705,7 +7705,7 @@ class BuiltinOperatorOverloadBuilder {
else
S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
}
-
+
// Add restrict version only if there are conversions to a restrict type
// and our candidate type is a non-restrict-qualified pointer.
if (HasRestrict && CandidateTy->isAnyPointerType() &&
@@ -7717,7 +7717,7 @@ class BuiltinOperatorOverloadBuilder {
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet);
else
S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet);
-
+
if (HasVolatile) {
ParamTypes[0]
= S.Context.getLValueReferenceType(
@@ -8274,7 +8274,7 @@ public:
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
-
+
if (!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
@@ -8282,7 +8282,7 @@ public:
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
-
+
if (NeedVolatile) {
// volatile restrict version
ParamTypes[0]
@@ -8323,7 +8323,7 @@ public:
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
}
-
+
if (!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
@@ -8331,7 +8331,7 @@ public:
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
-
+
if (NeedVolatile) {
// volatile restrict version
ParamTypes[0]
@@ -9424,13 +9424,13 @@ void Sema::NoteAllOverloadCandidates(Expr *OverloadedExpr, QualType DestType,
OverloadExpr *OvlExpr = Ovl.Expression;
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
- IEnd = OvlExpr->decls_end();
+ IEnd = OvlExpr->decls_end();
I != IEnd; ++I) {
- if (FunctionTemplateDecl *FunTmpl =
+ if (FunctionTemplateDecl *FunTmpl =
dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
NoteOverloadCandidate(*I, FunTmpl->getTemplatedDecl(), DestType,
TakingAddress);
- } else if (FunctionDecl *Fun
+ } else if (FunctionDecl *Fun
= dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
NoteOverloadCandidate(*I, Fun, DestType, TakingAddress);
}
@@ -9608,7 +9608,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< FromTy << ToTy << (unsigned) isObjectArgument << I+1
<< (unsigned) (Cand->Fix.Kind);
-
+
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -9711,7 +9711,7 @@ static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
// right number of arguments, because only overloaded operators have
// the weird behavior of overloading member and non-member functions.
// Just don't report anything.
- if (Fn->isInvalidDecl() &&
+ if (Fn->isInvalidDecl() &&
Fn->getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
return true;
@@ -9735,9 +9735,9 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
"The templated declaration should at least be a function"
" when diagnosing bad template argument deduction due to too many"
" or too few arguments");
-
+
FunctionDecl *Fn = cast<FunctionDecl>(D);
-
+
// TODO: treat calls to a missing default constructor as a special case
const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>();
unsigned MinParams = Fn->getMinRequiredArguments();
@@ -9993,8 +9993,8 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
return;
// FIXME: For generic lambda parameters, check if the function is a lambda
- // call operator, and if so, emit a prettier and more informative
- // diagnostic that mentions 'auto' and lambda in addition to
+ // call operator, and if so, emit a prettier and more informative
+ // diagnostic that mentions 'auto' and lambda in addition to
// (or instead of?) the canonical template type parameters.
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_non_deduced_mismatch)
@@ -10692,16 +10692,16 @@ void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
// R (S::*)(A) --> R (A)
QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
QualType Ret = PossiblyAFunctionType;
- if (const PointerType *ToTypePtr =
+ if (const PointerType *ToTypePtr =
PossiblyAFunctionType->getAs<PointerType>())
Ret = ToTypePtr->getPointeeType();
- else if (const ReferenceType *ToTypeRef =
+ else if (const ReferenceType *ToTypeRef =
PossiblyAFunctionType->getAs<ReferenceType>())
Ret = ToTypeRef->getPointeeType();
else if (const MemberPointerType *MemTypePtr =
- PossiblyAFunctionType->getAs<MemberPointerType>())
- Ret = MemTypePtr->getPointeeType();
- Ret =
+ PossiblyAFunctionType->getAs<MemberPointerType>())
+ Ret = MemTypePtr->getPointeeType();
+ Ret =
Context.getCanonicalType(Ret).getUnqualifiedType();
return Ret;
}
@@ -10727,9 +10727,9 @@ namespace {
class AddressOfFunctionResolver {
Sema& S;
Expr* SourceExpr;
- const QualType& TargetType;
- QualType TargetFunctionType; // Extracted function type from target type
-
+ const QualType& TargetType;
+ QualType TargetFunctionType; // Extracted function type from target type
+
bool Complain;
//DeclAccessPair& ResultFunctionAccessPair;
ASTContext& Context;
@@ -10739,7 +10739,7 @@ class AddressOfFunctionResolver {
bool StaticMemberFunctionFromBoundPointer;
bool HasComplained;
- OverloadExpr::FindResult OvlExprInfo;
+ OverloadExpr::FindResult OvlExprInfo;
OverloadExpr *OvlExpr;
TemplateArgumentListInfo OvlExplicitTemplateArgs;
SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches;
@@ -10786,7 +10786,7 @@ public:
}
return;
}
-
+
if (OvlExpr->hasExplicitTemplateArgs())
OvlExpr->copyTemplateArgumentsInto(OvlExplicitTemplateArgs);
@@ -10864,7 +10864,7 @@ private:
}
// return true if any matching specializations were found
- bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
+ bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method
= dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
@@ -10872,7 +10872,7 @@ private:
// static when converting to member pointer.
if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
return false;
- }
+ }
else if (TargetTypeIsNonStaticMemberFunction)
return false;
@@ -10885,17 +10885,17 @@ private:
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (Sema::TemplateDeductionResult Result
- = S.DeduceTemplateArguments(FunctionTemplate,
+ = S.DeduceTemplateArguments(FunctionTemplate,
&OvlExplicitTemplateArgs,
- TargetFunctionType, Specialization,
+ TargetFunctionType, Specialization,
Info, /*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
FailedCandidates.addCandidate()
.set(CurAccessFunPair, FunctionTemplate->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, Result, Info));
return false;
- }
-
+ }
+
// Template argument deduction ensures that we have an exact match or
// compatible pointer-to-function arguments that would be adjusted by ICS.
// This function template specicalization works.
@@ -10909,15 +10909,15 @@ private:
Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
return true;
}
-
- bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
+
+ bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
// Skip non-static functions when converting to pointer, and static
// when converting to member pointer.
if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
return false;
- }
+ }
else if (TargetTypeIsNonStaticMemberFunction)
return false;
@@ -10947,20 +10947,20 @@ private:
return true;
}
}
-
+
return false;
}
-
+
bool FindAllFunctionsThatMatchTargetTypeExactly() {
bool Ret = false;
-
+
// If the overload expression doesn't have the form of a pointer to
// member, don't try to convert it to a pointer-to-member type.
if (IsInvalidFormOfPointerToMemberFunction())
return false;
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
- E = OvlExpr->decls_end();
+ E = OvlExpr->decls_end();
I != E; ++I) {
// Look through any using declarations to find the underlying function.
NamedDecl *Fn = (*I)->getUnderlyingDecl();
@@ -11103,12 +11103,12 @@ public:
bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
int getNumMatches() const { return Matches.size(); }
-
+
FunctionDecl* getMatchingFunctionDecl() const {
if (Matches.size() != 1) return nullptr;
return Matches[0].second;
}
-
+
const DeclAccessPair* getMatchingFunctionAccessPair() const {
if (Matches.size() != 1) return nullptr;
return &Matches[0].first;
@@ -11248,7 +11248,7 @@ bool Sema::resolveAndFixAddressOfOnlyViableOverloadCandidate(
/// If no template-ids are found, no diagnostics are emitted and NULL is
/// returned.
FunctionDecl *
-Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
+Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain,
DeclAccessPair *FoundResult) {
// C++ [over.over]p1:
@@ -11311,9 +11311,9 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
}
return nullptr;
}
-
+
Matched = Specialization;
- if (FoundResult) *FoundResult = I.getPair();
+ if (FoundResult) *FoundResult = I.getPair();
}
if (Matched &&
@@ -11336,8 +11336,8 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// returns true if 'complain' is set.
bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr, bool doFunctionPointerConverion,
- bool complain, SourceRange OpRangeForComplaining,
- QualType DestTypeForComplaining,
+ bool complain, SourceRange OpRangeForComplaining,
+ QualType DestTypeForComplaining,
unsigned DiagIDForComplaining) {
assert(SrcExpr.get()->getType() == Context.OverloadTy);
@@ -11394,7 +11394,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining)
<< ovl.Expression->getName()
<< DestTypeForComplaining
- << OpRangeForComplaining
+ << OpRangeForComplaining
<< ovl.Expression->getQualifierLoc().getSourceRange();
NoteAllOverloadCandidates(SrcExpr.get());
@@ -12183,8 +12183,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// TODO: provide better source location info in DNLoc component.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, NamingClass,
- NestedNameSpecifierLoc(), OpNameInfo,
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
/*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return new (Context)
@@ -12747,12 +12747,12 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
return ExprError();
// If FoundDecl is different from Method (such as if one is a template
- // and the other a specialization), make sure DiagnoseUseOfDecl is
+ // and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
// FIXME: This would be more comprehensively addressed by modifying
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
// being used.
- if (Method != FoundDecl.getDecl() &&
+ if (Method != FoundDecl.getDecl() &&
DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc()))
return ExprError();
break;
@@ -12775,7 +12775,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
case OR_Deleted:
Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call)
<< Best->Function->isDeleted()
- << DeclName
+ << DeclName
<< getDeletedOrUnavailableSuffix(Best->Function)
<< MemExprE->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
@@ -12848,8 +12848,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
}
- if ((isa<CXXConstructorDecl>(CurContext) ||
- isa<CXXDestructorDecl>(CurContext)) &&
+ if ((isa<CXXConstructorDecl>(CurContext) ||
+ isa<CXXDestructorDecl>(CurContext)) &&
TheCall->getMethodDecl()->isPure()) {
const CXXMethodDecl *MD = TheCall->getMethodDecl();
@@ -12929,7 +12929,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
}
// C++ [over.call.object]p2:
- // In addition, for each (non-explicit in C++0x) conversion function
+ // In addition, for each (non-explicit in C++0x) conversion function
// declared in T of the form
//
// operator conversion-type-id () cv-qualifier;
@@ -13008,7 +13008,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Diag(Object.get()->getLocStart(),
diag::err_ovl_deleted_object_call)
<< Best->Function->isDeleted()
- << Object.get()->getType()
+ << Object.get()->getType()
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
@@ -13031,7 +13031,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Best->FoundDecl);
if (DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc))
return ExprError();
- assert(Conv == Best->FoundDecl.getDecl() &&
+ assert(Conv == Best->FoundDecl.getDecl() &&
"Found Decl & conversion-to-functionptr should be same, right?!");
// We selected one of the surrogate functions that converts the
// object parameter to a function pointer. Perform the conversion
@@ -13242,7 +13242,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
case OR_Deleted:
Diag(OpLoc, diag::err_ovl_deleted_oper)
<< Best->Function->isDeleted()
- << "->"
+ << "->"
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Base->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index f8970744389b..e7315934b515 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -3175,11 +3175,7 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D,
for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
Attr; Attr = Attr->getNext()) {
if (Attr->getKind() == AttributeList::AT_OpenCLKernel) {
- llvm::Triple::ArchType arch = S.Context.getTargetInfo().getTriple().getArch();
- if (arch == llvm::Triple::spir || arch == llvm::Triple::spir64 ||
- arch == llvm::Triple::amdgcn || arch == llvm::Triple::r600) {
- CC = CC_OpenCLKernel;
- }
+ CC = CC_OpenCLKernel;
break;
}
}
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index c931b13f65f3..95cb54f944e4 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -1422,8 +1422,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
}
- if (WritingModule) {
+ if (WritingModule) {
// Module name
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_NAME));
@@ -1466,9 +1466,10 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.clear();
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
-
- // Primary module map file.
- AddPath(Map.getModuleMapFileForUniquing(WritingModule)->getName(), Record);
+ AddPath(WritingModule->PresumedModuleMapFile.empty()
+ ? Map.getModuleMapFileForUniquing(WritingModule)->getName()
+ : StringRef(WritingModule->PresumedModuleMapFile),
+ Record);
// Additional module map files.
if (auto *AdditionalModMaps =