aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-20 21:35:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-20 21:35:28 +0000
commite489f4451b6244af10ec548a442f4d5b6870b0c1 (patch)
tree5dc6cd37794791820f8ccd8610334583a14ea68a
parent2298981669bf3bd63335a4be179bc0f96823a8f4 (diff)
downloadsrc-e489f4451b6244af10ec548a442f4d5b6870b0c1.tar.gz
src-e489f4451b6244af10ec548a442f4d5b6870b0c1.zip
Vendor import of clang release_90 branch r369369:vendor/clang/clang-release_90-r369369
Notes
Notes: svn path=/vendor/clang/dist-release_90/; revision=351305 svn path=/vendor/clang/clang-release_90-r369369/; revision=351306; tag=vendor/clang/clang-release_90-r369369
-rw-r--r--include/clang/AST/ExprCXX.h11
-rw-r--r--include/clang/Basic/DiagnosticSemaKinds.td4
-rw-r--r--include/clang/Basic/TargetInfo.h8
-rw-r--r--include/clang/Driver/Options.td2
-rw-r--r--include/clang/Frontend/LangStandards.def1
-rw-r--r--include/clang/Sema/Sema.h1
-rw-r--r--lib/AST/ASTContext.cpp2
-rw-r--r--lib/AST/ExprCXX.cpp7
-rw-r--r--lib/AST/ItaniumCXXABI.cpp2
-rw-r--r--lib/AST/MicrosoftCXXABI.cpp2
-rw-r--r--lib/Basic/Targets/AArch64.cpp16
-rw-r--r--lib/Basic/Targets/OSTargets.h5
-rw-r--r--lib/Basic/Targets/RISCV.cpp17
-rw-r--r--lib/Basic/Targets/RISCV.h6
-rw-r--r--lib/Basic/Targets/SPIR.h2
-rw-r--r--lib/Basic/Targets/X86.h6
-rw-r--r--lib/Basic/Version.cpp2
-rw-r--r--lib/CodeGen/CGBuiltin.cpp290
-rw-r--r--lib/CodeGen/CGExprAgg.cpp19
-rw-r--r--lib/CodeGen/CGStmt.cpp8
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp7
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp7
-rw-r--r--lib/CodeGen/TargetInfo.cpp289
-rw-r--r--lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp3
-rw-r--r--lib/Driver/ToolChains/Clang.cpp58
-rw-r--r--lib/Frontend/ASTUnit.cpp1
-rw-r--r--lib/Frontend/CompilerInvocation.cpp2
-rw-r--r--lib/Frontend/InitPreprocessor.cpp18
-rw-r--r--lib/Headers/emmintrin.h6
-rw-r--r--lib/Headers/opencl-c-base.h17
-rw-r--r--lib/Headers/opencl-c.h210
-rw-r--r--lib/Lex/PPDirectives.cpp14
-rw-r--r--lib/Sema/Sema.cpp1
-rw-r--r--lib/Sema/SemaDecl.cpp32
-rw-r--r--lib/Sema/SemaDeclCXX.cpp30
-rw-r--r--lib/Sema/SemaExprCXX.cpp6
-rw-r--r--lib/Sema/SemaInit.cpp16
-rw-r--r--lib/Sema/SemaOpenMP.cpp107
-rw-r--r--lib/Sema/SemaStmtAsm.cpp32
-rw-r--r--lib/Sema/SemaTemplate.cpp20
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorChecker.cpp12
41 files changed, 831 insertions, 468 deletions
diff --git a/include/clang/AST/ExprCXX.h b/include/clang/AST/ExprCXX.h
index 28ed6cdfde14..faddbc6d9675 100644
--- a/include/clang/AST/ExprCXX.h
+++ b/include/clang/AST/ExprCXX.h
@@ -185,15 +185,20 @@ public:
static CXXMemberCallExpr *CreateEmpty(const ASTContext &Ctx, unsigned NumArgs,
EmptyShell Empty);
- /// Retrieves the implicit object argument for the member call.
+ /// Retrieve the implicit object argument for the member call.
///
/// For example, in "x.f(5)", this returns the sub-expression "x".
Expr *getImplicitObjectArgument() const;
- /// Retrieves the declaration of the called method.
+ /// Retrieve the type of the object argument.
+ ///
+ /// Note that this always returns a non-pointer type.
+ QualType getObjectType() const;
+
+ /// Retrieve the declaration of the called method.
CXXMethodDecl *getMethodDecl() const;
- /// Retrieves the CXXRecordDecl for the underlying type of
+ /// Retrieve the CXXRecordDecl for the underlying type of
/// the implicit object argument.
///
/// Note that this is may not be the same declaration as that of the class
diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td
index effcbad78b23..275c4e4365d1 100644
--- a/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/include/clang/Basic/DiagnosticSemaKinds.td
@@ -598,6 +598,10 @@ def ext_implicit_lib_function_decl : ExtWarn<
def note_include_header_or_declare : Note<
"include the header <%0> or explicitly provide a declaration for '%1'">;
def note_previous_builtin_declaration : Note<"%0 is a builtin with type %1">;
+def warn_implicit_decl_no_jmp_buf
+ : Warning<"declaration of built-in function '%0' requires the declaration"
+ " of the 'jmp_buf' type, commonly provided in the header <setjmp.h>.">,
+ InGroup<DiagGroup<"incomplete-setjmp-declaration">>;
def warn_implicit_decl_requires_sysheader : Warning<
"declaration of built-in function '%1' requires inclusion of the header <%0>">,
InGroup<BuiltinRequiresHeader>;
diff --git a/include/clang/Basic/TargetInfo.h b/include/clang/Basic/TargetInfo.h
index 7a8384f5fbc0..c6c966dfbe2c 100644
--- a/include/clang/Basic/TargetInfo.h
+++ b/include/clang/Basic/TargetInfo.h
@@ -1249,15 +1249,9 @@ public:
bool isBigEndian() const { return BigEndian; }
bool isLittleEndian() const { return !BigEndian; }
- enum CallingConvMethodType {
- CCMT_Unknown,
- CCMT_Member,
- CCMT_NonMember
- };
-
/// Gets the default calling convention for the given target and
/// declaration context.
- virtual CallingConv getDefaultCallingConv(CallingConvMethodType MT) const {
+ virtual CallingConv getDefaultCallingConv() const {
// Not all targets will specify an explicit calling convention that we can
// express. This will always do the right thing, even though it's not
// an explicit calling convention.
diff --git a/include/clang/Driver/Options.td b/include/clang/Driver/Options.td
index dfd27fab796e..4ea8bfff0973 100644
--- a/include/clang/Driver/Options.td
+++ b/include/clang/Driver/Options.td
@@ -518,7 +518,7 @@ def cl_mad_enable : Flag<["-"], "cl-mad-enable">, Group<opencl_Group>, Flags<[CC
def cl_no_signed_zeros : Flag<["-"], "cl-no-signed-zeros">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow use of less precise no signed zeros computations in the generated binary.">;
def cl_std_EQ : Joined<["-"], "cl-std=">, Group<opencl_Group>, Flags<[CC1Option]>,
- HelpText<"OpenCL language standard to compile for.">, Values<"cl,CL,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0,c++">;
+ HelpText<"OpenCL language standard to compile for.">, Values<"cl,CL,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0,clc++,CLC++">;
def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow denormals to be flushed to zero.">;
def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>, Flags<[CC1Option]>,
diff --git a/include/clang/Frontend/LangStandards.def b/include/clang/Frontend/LangStandards.def
index 0964e9b90a03..72ea23562ebd 100644
--- a/include/clang/Frontend/LangStandards.def
+++ b/include/clang/Frontend/LangStandards.def
@@ -174,6 +174,7 @@ LANGSTANDARD_ALIAS_DEPR(opencl10, "CL")
LANGSTANDARD_ALIAS_DEPR(opencl11, "CL1.1")
LANGSTANDARD_ALIAS_DEPR(opencl12, "CL1.2")
LANGSTANDARD_ALIAS_DEPR(opencl20, "CL2.0")
+LANGSTANDARD_ALIAS_DEPR(openclcpp, "CLC++")
// CUDA
LANGSTANDARD(cuda, "cuda", CUDA, "NVIDIA CUDA(tm)",
diff --git a/include/clang/Sema/Sema.h b/include/clang/Sema/Sema.h
index af762f74d745..e6c63fd9c015 100644
--- a/include/clang/Sema/Sema.h
+++ b/include/clang/Sema/Sema.h
@@ -11165,6 +11165,7 @@ public:
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
+ SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 0d69eb90abaf..468c7f47657d 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -10035,7 +10035,7 @@ CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
break;
}
}
- return Target->getDefaultCallingConv(TargetInfo::CCMT_Unknown);
+ return Target->getDefaultCallingConv();
}
bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index b30f785ba8f5..c5f86a4cc12b 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -651,6 +651,13 @@ Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
return nullptr;
}
+QualType CXXMemberCallExpr::getObjectType() const {
+ QualType Ty = getImplicitObjectArgument()->getType();
+ if (Ty->isPointerType())
+ Ty = Ty->getPointeeType();
+ return Ty;
+}
+
CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
if (const auto *MemExpr = dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index 727a905d08a1..77fb5a1d33b3 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -177,7 +177,7 @@ public:
if (!isVariadic && T.isWindowsGNUEnvironment() &&
T.getArch() == llvm::Triple::x86)
return CC_X86ThisCall;
- return CC_C;
+ return Context.getTargetInfo().getDefaultCallingConv();
}
// We cheat and just check that the class has a vtable pointer, and that it's
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index 4dc4156df9ca..444e55f777fa 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -82,7 +82,7 @@ public:
if (!isVariadic &&
Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
return CC_X86ThisCall;
- return CC_C;
+ return Context.getTargetInfo().getDefaultCallingConv();
}
bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 74ac69ab8946..25f2b7b35f41 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -196,9 +196,6 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_NEON_FP", "0xE");
}
- if (FPU & SveMode)
- Builder.defineMacro("__ARM_FEATURE_SVE", "1");
-
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
@@ -351,10 +348,19 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
"d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
"d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
- // Vector registers
+ // Neon vector registers
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
"v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
- "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
+ "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+
+ // SVE vector registers
+ "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
+ "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
+ "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
+
+ // SVE predicate registers
+ "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
+ "p11", "p12", "p13", "p14", "p15"
};
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
diff --git a/lib/Basic/Targets/OSTargets.h b/lib/Basic/Targets/OSTargets.h
index 8542311ffa41..c0373ffaa444 100644
--- a/lib/Basic/Targets/OSTargets.h
+++ b/lib/Basic/Targets/OSTargets.h
@@ -618,8 +618,11 @@ protected:
Builder.defineMacro("_XOPEN_SOURCE", "600");
else
Builder.defineMacro("_XOPEN_SOURCE", "500");
- if (Opts.CPlusPlus)
+ if (Opts.CPlusPlus) {
Builder.defineMacro("__C99FEATURES__");
+ Builder.defineMacro("_FILE_OFFSET_BITS", "64");
+ }
+ // GCC restricts the next two to C++.
Builder.defineMacro("_LARGEFILE_SOURCE");
Builder.defineMacro("_LARGEFILE64_SOURCE");
Builder.defineMacro("__EXTENSIONS__");
diff --git a/lib/Basic/Targets/RISCV.cpp b/lib/Basic/Targets/RISCV.cpp
index f800bb0b25da..939ac46d671b 100644
--- a/lib/Basic/Targets/RISCV.cpp
+++ b/lib/Basic/Targets/RISCV.cpp
@@ -56,6 +56,10 @@ bool RISCVTargetInfo::validateAsmConstraint(
// A 5-bit unsigned immediate for CSR access instructions.
Info.setRequiresImmediate(0, 31);
return true;
+ case 'f':
+ // A floating-point register.
+ Info.setAllowsRegister();
+ return true;
}
}
@@ -65,9 +69,18 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv");
bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;
Builder.defineMacro("__riscv_xlen", Is64Bit ? "64" : "32");
- // TODO: modify when more code models and ABIs are supported.
+ // TODO: modify when more code models are supported.
Builder.defineMacro("__riscv_cmodel_medlow");
- Builder.defineMacro("__riscv_float_abi_soft");
+
+ StringRef ABIName = getABI();
+ if (ABIName == "ilp32f" || ABIName == "lp64f")
+ Builder.defineMacro("__riscv_float_abi_single");
+ else if (ABIName == "ilp32d" || ABIName == "lp64d")
+ Builder.defineMacro("__riscv_float_abi_double");
+ else if (ABIName == "ilp32e")
+ Builder.defineMacro("__riscv_abi_rve");
+ else
+ Builder.defineMacro("__riscv_float_abi_soft");
if (HasM) {
Builder.defineMacro("__riscv_mul");
diff --git a/lib/Basic/Targets/RISCV.h b/lib/Basic/Targets/RISCV.h
index bc814b79ce51..ce193feaeb98 100644
--- a/lib/Basic/Targets/RISCV.h
+++ b/lib/Basic/Targets/RISCV.h
@@ -87,8 +87,7 @@ public:
}
bool setABI(const std::string &Name) override {
- // TODO: support ilp32f and ilp32d ABIs.
- if (Name == "ilp32") {
+ if (Name == "ilp32" || Name == "ilp32f" || Name == "ilp32d") {
ABI = Name;
return true;
}
@@ -105,8 +104,7 @@ public:
}
bool setABI(const std::string &Name) override {
- // TODO: support lp64f and lp64d ABIs.
- if (Name == "lp64") {
+ if (Name == "lp64" || Name == "lp64f" || Name == "lp64d") {
ABI = Name;
return true;
}
diff --git a/lib/Basic/Targets/SPIR.h b/lib/Basic/Targets/SPIR.h
index 6023c868dbdc..802ccf8b671e 100644
--- a/lib/Basic/Targets/SPIR.h
+++ b/lib/Basic/Targets/SPIR.h
@@ -88,7 +88,7 @@ public:
: CCCR_Warning;
}
- CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
+ CallingConv getDefaultCallingConv() const override {
return CC_SpirFunction;
}
diff --git a/lib/Basic/Targets/X86.h b/lib/Basic/Targets/X86.h
index 588b6d3da1d6..dd1e7db6c81e 100644
--- a/lib/Basic/Targets/X86.h
+++ b/lib/Basic/Targets/X86.h
@@ -320,8 +320,8 @@ public:
}
}
- CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
- return MT == CCMT_Member ? CC_X86ThisCall : CC_C;
+ CallingConv getDefaultCallingConv() const override {
+ return CC_C;
}
bool hasSjLjLowering() const override { return true; }
@@ -659,7 +659,7 @@ public:
}
}
- CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override {
+ CallingConv getDefaultCallingConv() const override {
return CC_C;
}
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 3006ca33f213..9bf8687a8765 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -35,7 +35,7 @@ std::string getClangRepositoryPath() {
// If the CLANG_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/branches/release_90/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a300bab49f9c..cadce507412b 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -8011,6 +8011,151 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
}
+ case AArch64::BI_BitScanForward:
+ case AArch64::BI_BitScanForward64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
+ case AArch64::BI_BitScanReverse:
+ case AArch64::BI_BitScanReverse64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+ case AArch64::BI_InterlockedAnd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+ case AArch64::BI_InterlockedExchange64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+ case AArch64::BI_InterlockedExchangeAdd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+ case AArch64::BI_InterlockedExchangeSub64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+ case AArch64::BI_InterlockedOr64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+ case AArch64::BI_InterlockedXor64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+ case AArch64::BI_InterlockedDecrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+ case AArch64::BI_InterlockedIncrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+ case AArch64::BI_InterlockedExchangeAdd8_acq:
+ case AArch64::BI_InterlockedExchangeAdd16_acq:
+ case AArch64::BI_InterlockedExchangeAdd_acq:
+ case AArch64::BI_InterlockedExchangeAdd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
+ case AArch64::BI_InterlockedExchangeAdd8_rel:
+ case AArch64::BI_InterlockedExchangeAdd16_rel:
+ case AArch64::BI_InterlockedExchangeAdd_rel:
+ case AArch64::BI_InterlockedExchangeAdd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
+ case AArch64::BI_InterlockedExchangeAdd8_nf:
+ case AArch64::BI_InterlockedExchangeAdd16_nf:
+ case AArch64::BI_InterlockedExchangeAdd_nf:
+ case AArch64::BI_InterlockedExchangeAdd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
+ case AArch64::BI_InterlockedExchange8_acq:
+ case AArch64::BI_InterlockedExchange16_acq:
+ case AArch64::BI_InterlockedExchange_acq:
+ case AArch64::BI_InterlockedExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
+ case AArch64::BI_InterlockedExchange8_rel:
+ case AArch64::BI_InterlockedExchange16_rel:
+ case AArch64::BI_InterlockedExchange_rel:
+ case AArch64::BI_InterlockedExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
+ case AArch64::BI_InterlockedExchange8_nf:
+ case AArch64::BI_InterlockedExchange16_nf:
+ case AArch64::BI_InterlockedExchange_nf:
+ case AArch64::BI_InterlockedExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case AArch64::BI_InterlockedCompareExchange8_acq:
+ case AArch64::BI_InterlockedCompareExchange16_acq:
+ case AArch64::BI_InterlockedCompareExchange_acq:
+ case AArch64::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case AArch64::BI_InterlockedCompareExchange8_rel:
+ case AArch64::BI_InterlockedCompareExchange16_rel:
+ case AArch64::BI_InterlockedCompareExchange_rel:
+ case AArch64::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case AArch64::BI_InterlockedCompareExchange8_nf:
+ case AArch64::BI_InterlockedCompareExchange16_nf:
+ case AArch64::BI_InterlockedCompareExchange_nf:
+ case AArch64::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
+ case AArch64::BI_InterlockedOr8_acq:
+ case AArch64::BI_InterlockedOr16_acq:
+ case AArch64::BI_InterlockedOr_acq:
+ case AArch64::BI_InterlockedOr64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
+ case AArch64::BI_InterlockedOr8_rel:
+ case AArch64::BI_InterlockedOr16_rel:
+ case AArch64::BI_InterlockedOr_rel:
+ case AArch64::BI_InterlockedOr64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
+ case AArch64::BI_InterlockedOr8_nf:
+ case AArch64::BI_InterlockedOr16_nf:
+ case AArch64::BI_InterlockedOr_nf:
+ case AArch64::BI_InterlockedOr64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
+ case AArch64::BI_InterlockedXor8_acq:
+ case AArch64::BI_InterlockedXor16_acq:
+ case AArch64::BI_InterlockedXor_acq:
+ case AArch64::BI_InterlockedXor64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
+ case AArch64::BI_InterlockedXor8_rel:
+ case AArch64::BI_InterlockedXor16_rel:
+ case AArch64::BI_InterlockedXor_rel:
+ case AArch64::BI_InterlockedXor64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
+ case AArch64::BI_InterlockedXor8_nf:
+ case AArch64::BI_InterlockedXor16_nf:
+ case AArch64::BI_InterlockedXor_nf:
+ case AArch64::BI_InterlockedXor64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
+ case AArch64::BI_InterlockedAnd8_acq:
+ case AArch64::BI_InterlockedAnd16_acq:
+ case AArch64::BI_InterlockedAnd_acq:
+ case AArch64::BI_InterlockedAnd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
+ case AArch64::BI_InterlockedAnd8_rel:
+ case AArch64::BI_InterlockedAnd16_rel:
+ case AArch64::BI_InterlockedAnd_rel:
+ case AArch64::BI_InterlockedAnd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
+ case AArch64::BI_InterlockedAnd8_nf:
+ case AArch64::BI_InterlockedAnd16_nf:
+ case AArch64::BI_InterlockedAnd_nf:
+ case AArch64::BI_InterlockedAnd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
+ case AArch64::BI_InterlockedIncrement16_acq:
+ case AArch64::BI_InterlockedIncrement_acq:
+ case AArch64::BI_InterlockedIncrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
+ case AArch64::BI_InterlockedIncrement16_rel:
+ case AArch64::BI_InterlockedIncrement_rel:
+ case AArch64::BI_InterlockedIncrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
+ case AArch64::BI_InterlockedIncrement16_nf:
+ case AArch64::BI_InterlockedIncrement_nf:
+ case AArch64::BI_InterlockedIncrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
+ case AArch64::BI_InterlockedDecrement16_acq:
+ case AArch64::BI_InterlockedDecrement_acq:
+ case AArch64::BI_InterlockedDecrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
+ case AArch64::BI_InterlockedDecrement16_rel:
+ case AArch64::BI_InterlockedDecrement_rel:
+ case AArch64::BI_InterlockedDecrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
+ case AArch64::BI_InterlockedDecrement16_nf:
+ case AArch64::BI_InterlockedDecrement_nf:
+ case AArch64::BI_InterlockedDecrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
+
+ case AArch64::BI_InterlockedAdd: {
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ Value *Arg1 = EmitScalarExpr(E->getArg(1));
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add, Arg0, Arg1,
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateAdd(RMWI, Arg1);
+ }
}
llvm::VectorType *VTy = GetNeonType(this, Type);
@@ -9128,151 +9273,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::aarch64_neon_suqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
}
- case AArch64::BI_BitScanForward:
- case AArch64::BI_BitScanForward64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
- case AArch64::BI_BitScanReverse:
- case AArch64::BI_BitScanReverse64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
- case AArch64::BI_InterlockedAnd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
- case AArch64::BI_InterlockedExchange64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
- case AArch64::BI_InterlockedExchangeAdd64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
- case AArch64::BI_InterlockedExchangeSub64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
- case AArch64::BI_InterlockedOr64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
- case AArch64::BI_InterlockedXor64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
- case AArch64::BI_InterlockedDecrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
- case AArch64::BI_InterlockedIncrement64:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
- case AArch64::BI_InterlockedExchangeAdd8_acq:
- case AArch64::BI_InterlockedExchangeAdd16_acq:
- case AArch64::BI_InterlockedExchangeAdd_acq:
- case AArch64::BI_InterlockedExchangeAdd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
- case AArch64::BI_InterlockedExchangeAdd8_rel:
- case AArch64::BI_InterlockedExchangeAdd16_rel:
- case AArch64::BI_InterlockedExchangeAdd_rel:
- case AArch64::BI_InterlockedExchangeAdd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
- case AArch64::BI_InterlockedExchangeAdd8_nf:
- case AArch64::BI_InterlockedExchangeAdd16_nf:
- case AArch64::BI_InterlockedExchangeAdd_nf:
- case AArch64::BI_InterlockedExchangeAdd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
- case AArch64::BI_InterlockedExchange8_acq:
- case AArch64::BI_InterlockedExchange16_acq:
- case AArch64::BI_InterlockedExchange_acq:
- case AArch64::BI_InterlockedExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
- case AArch64::BI_InterlockedExchange8_rel:
- case AArch64::BI_InterlockedExchange16_rel:
- case AArch64::BI_InterlockedExchange_rel:
- case AArch64::BI_InterlockedExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
- case AArch64::BI_InterlockedExchange8_nf:
- case AArch64::BI_InterlockedExchange16_nf:
- case AArch64::BI_InterlockedExchange_nf:
- case AArch64::BI_InterlockedExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
- case AArch64::BI_InterlockedCompareExchange8_acq:
- case AArch64::BI_InterlockedCompareExchange16_acq:
- case AArch64::BI_InterlockedCompareExchange_acq:
- case AArch64::BI_InterlockedCompareExchange64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
- case AArch64::BI_InterlockedCompareExchange8_rel:
- case AArch64::BI_InterlockedCompareExchange16_rel:
- case AArch64::BI_InterlockedCompareExchange_rel:
- case AArch64::BI_InterlockedCompareExchange64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
- case AArch64::BI_InterlockedCompareExchange8_nf:
- case AArch64::BI_InterlockedCompareExchange16_nf:
- case AArch64::BI_InterlockedCompareExchange_nf:
- case AArch64::BI_InterlockedCompareExchange64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
- case AArch64::BI_InterlockedOr8_acq:
- case AArch64::BI_InterlockedOr16_acq:
- case AArch64::BI_InterlockedOr_acq:
- case AArch64::BI_InterlockedOr64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
- case AArch64::BI_InterlockedOr8_rel:
- case AArch64::BI_InterlockedOr16_rel:
- case AArch64::BI_InterlockedOr_rel:
- case AArch64::BI_InterlockedOr64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
- case AArch64::BI_InterlockedOr8_nf:
- case AArch64::BI_InterlockedOr16_nf:
- case AArch64::BI_InterlockedOr_nf:
- case AArch64::BI_InterlockedOr64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case AArch64::BI_InterlockedXor8_acq:
- case AArch64::BI_InterlockedXor16_acq:
- case AArch64::BI_InterlockedXor_acq:
- case AArch64::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case AArch64::BI_InterlockedXor8_rel:
- case AArch64::BI_InterlockedXor16_rel:
- case AArch64::BI_InterlockedXor_rel:
- case AArch64::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case AArch64::BI_InterlockedXor8_nf:
- case AArch64::BI_InterlockedXor16_nf:
- case AArch64::BI_InterlockedXor_nf:
- case AArch64::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
- case AArch64::BI_InterlockedAnd8_acq:
- case AArch64::BI_InterlockedAnd16_acq:
- case AArch64::BI_InterlockedAnd_acq:
- case AArch64::BI_InterlockedAnd64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
- case AArch64::BI_InterlockedAnd8_rel:
- case AArch64::BI_InterlockedAnd16_rel:
- case AArch64::BI_InterlockedAnd_rel:
- case AArch64::BI_InterlockedAnd64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
- case AArch64::BI_InterlockedAnd8_nf:
- case AArch64::BI_InterlockedAnd16_nf:
- case AArch64::BI_InterlockedAnd_nf:
- case AArch64::BI_InterlockedAnd64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
- case AArch64::BI_InterlockedIncrement16_acq:
- case AArch64::BI_InterlockedIncrement_acq:
- case AArch64::BI_InterlockedIncrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
- case AArch64::BI_InterlockedIncrement16_rel:
- case AArch64::BI_InterlockedIncrement_rel:
- case AArch64::BI_InterlockedIncrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
- case AArch64::BI_InterlockedIncrement16_nf:
- case AArch64::BI_InterlockedIncrement_nf:
- case AArch64::BI_InterlockedIncrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
- case AArch64::BI_InterlockedDecrement16_acq:
- case AArch64::BI_InterlockedDecrement_acq:
- case AArch64::BI_InterlockedDecrement64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
- case AArch64::BI_InterlockedDecrement16_rel:
- case AArch64::BI_InterlockedDecrement_rel:
- case AArch64::BI_InterlockedDecrement64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
- case AArch64::BI_InterlockedDecrement16_nf:
- case AArch64::BI_InterlockedDecrement_nf:
- case AArch64::BI_InterlockedDecrement64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
-
- case AArch64::BI_InterlockedAdd: {
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- Value *Arg1 = EmitScalarExpr(E->getArg(1));
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add, Arg0, Arg1,
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateAdd(RMWI, Arg1);
- }
}
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 695facd50b67..0a57870a7c58 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -1495,6 +1495,13 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// initializers throws an exception.
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
llvm::Instruction *cleanupDominator = nullptr;
+ auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
+ cleanups.push_back(cleanup);
+ if (!cleanupDominator) // create placeholder once needed
+ cleanupDominator = CGF.Builder.CreateAlignedLoad(
+ CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy),
+ CharUnits::One());
+ };
unsigned curInitIndex = 0;
@@ -1519,7 +1526,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (QualType::DestructionKind dtorKind =
Base.getType().isDestructedType()) {
CGF.pushDestroy(dtorKind, V, Base.getType());
- cleanups.push_back(CGF.EHStack.stable_begin());
+ addCleanup(CGF.EHStack.stable_begin());
}
}
}
@@ -1596,15 +1603,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
= field->getType().isDestructedType()) {
assert(LV.isSimple());
if (CGF.needsEHCleanup(dtorKind)) {
- if (!cleanupDominator)
- cleanupDominator = CGF.Builder.CreateAlignedLoad(
- CGF.Int8Ty,
- llvm::Constant::getNullValue(CGF.Int8PtrTy),
- CharUnits::One()); // placeholder
-
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
CGF.getDestroyer(dtorKind), false);
- cleanups.push_back(CGF.EHStack.stable_begin());
+ addCleanup(CGF.EHStack.stable_begin());
pushedCleanup = true;
}
}
@@ -1620,6 +1621,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Deactivate all the partial cleanups in reverse order, which
// generally means popping them.
+ assert((cleanupDominator || cleanups.empty()) &&
+ "Missing cleanupDominator before deactivating cleanup blocks");
for (unsigned i = cleanups.size(); i != 0; --i)
CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index dd0dea5b94a0..40ab79509f98 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -1846,11 +1846,9 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
llvm::APSInt IntResult;
- if (!EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
- getContext()))
- llvm_unreachable("Invalid immediate constant!");
-
- return llvm::ConstantInt::get(getLLVMContext(), IntResult);
+ if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
+ getContext()))
+ return llvm::ConstantInt::get(getLLVMContext(), IntResult);
}
Expr::EvalResult Result;
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 3b2413d960d6..51a2561a4552 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1755,10 +1755,11 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
QualType ThisTy;
- if (CE)
- ThisTy = CE->getImplicitObjectArgument()->getType()->getPointeeType();
- else
+ if (CE) {
+ ThisTy = CE->getObjectType();
+ } else {
ThisTy = D->getDestroyedType();
+ }
CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
QualType(), nullptr);
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index fa34414de5da..ca06ad3f042b 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1921,10 +1921,11 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
DtorType == Dtor_Deleting);
QualType ThisTy;
- if (CE)
- ThisTy = CE->getImplicitObjectArgument()->getType()->getPointeeType();
- else
+ if (CE) {
+ ThisTy = CE->getObjectType();
+ } else {
ThisTy = D->getDestroyedType();
+ }
This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
RValue RV = CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 5da988fb8a3c..1e1038dbfe95 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -9188,25 +9188,45 @@ static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
namespace {
class RISCVABIInfo : public DefaultABIInfo {
private:
- unsigned XLen; // Size of the integer ('x') registers in bits.
+ // Size of the integer ('x') registers in bits.
+ unsigned XLen;
+ // Size of the floating point ('f') registers in bits. Note that the target
+ // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
+ // with soft float ABI has FLen==0).
+ unsigned FLen;
static const int NumArgGPRs = 8;
+ static const int NumArgFPRs = 8;
+ bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const;
public:
- RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
- : DefaultABIInfo(CGT), XLen(XLen) {}
+ RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
+ : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
// DefaultABIInfo's classifyReturnType and classifyArgumentType are
// non-virtual, but computeInfo is virtual, so we overload it.
void computeInfo(CGFunctionInfo &FI) const override;
- ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed,
- int &ArgGPRsLeft) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
+ int &ArgFPRsLeft) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
ABIArgInfo extendType(QualType Ty) const;
+
+ bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off, llvm::Type *&Field2Ty,
+ CharUnits &Field2Off, int &NeededArgGPRs,
+ int &NeededArgFPRs) const;
+ ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
+ CharUnits Field1Off,
+ llvm::Type *Field2Ty,
+ CharUnits Field2Off) const;
};
} // end anonymous namespace
@@ -9228,18 +9248,215 @@ void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
// different for variadic arguments, we must also track whether we are
// examining a vararg or not.
int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
int NumFixedArgs = FI.getNumRequiredArgs();
int ArgNum = 0;
for (auto &ArgInfo : FI.arguments()) {
bool IsFixed = ArgNum < NumFixedArgs;
- ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft);
+ ArgInfo.info =
+ classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
ArgNum++;
}
}
+// Returns true if the struct is a potential candidate for the floating point
+// calling convention. If this function returns true, the caller is
+// responsible for checking that if there is only a single field then that
+// field is a float.
+bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const {
+ bool IsInt = Ty->isIntegralOrEnumerationType();
+ bool IsFloat = Ty->isRealFloatingType();
+
+ if (IsInt || IsFloat) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (IsInt && Size > XLen)
+ return false;
+ // Can't be eligible if larger than the FP registers. Half precision isn't
+ // currently supported on RISC-V and the ABI hasn't been confirmed, so
+ // default to the integer ABI in that case.
+ if (IsFloat && (Size > FLen || Size < 32))
+ return false;
+ // Can't be eligible if an integer type was already found (int+int pairs
+ // are not eligible).
+ if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
+ return false;
+ if (!Field1Ty) {
+ Field1Ty = CGT.ConvertType(Ty);
+ Field1Off = CurOff;
+ return true;
+ }
+ if (!Field2Ty) {
+ Field2Ty = CGT.ConvertType(Ty);
+ Field2Off = CurOff;
+ return true;
+ }
+ return false;
+ }
+
+ if (auto CTy = Ty->getAs<ComplexType>()) {
+ if (Field1Ty)
+ return false;
+ QualType EltTy = CTy->getElementType();
+ if (getContext().getTypeSize(EltTy) > FLen)
+ return false;
+ Field1Ty = CGT.ConvertType(EltTy);
+ Field1Off = CurOff;
+ assert(CurOff.isZero() && "Unexpected offset for first field");
+ Field2Ty = Field1Ty;
+ Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
+ return true;
+ }
+
+ if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t ArraySize = ATy->getSize().getZExtValue();
+ QualType EltTy = ATy->getElementType();
+ CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
+ for (uint64_t i = 0; i < ArraySize; ++i) {
+ bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
+ Field1Off, Field2Ty, Field2Off);
+ if (!Ret)
+ return false;
+ CurOff += EltSize;
+ }
+ return true;
+ }
+
+ if (const auto *RTy = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are not eligible for the FP calling convention.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT.getCXXABI()))
+ return false;
+ if (isEmptyRecord(getContext(), Ty, true))
+ return true;
+ const RecordDecl *RD = RTy->getDecl();
+ // Unions aren't eligible unless they're empty (which is caught above).
+ if (RD->isUnion())
+ return false;
+ int ZeroWidthBitFieldCount = 0;
+ for (const FieldDecl *FD : RD->fields()) {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
+ QualType QTy = FD->getType();
+ if (FD->isBitField()) {
+ unsigned BitWidth = FD->getBitWidthValue(getContext());
+ // Allow a bitfield with a type greater than XLen as long as the
+ // bitwidth is XLen or less.
+ if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
+ QTy = getContext().getIntTypeForBitwidth(XLen, false);
+ if (BitWidth == 0) {
+ ZeroWidthBitFieldCount++;
+ continue;
+ }
+ }
+
+ bool Ret = detectFPCCEligibleStructHelper(
+ QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
+ Field1Ty, Field1Off, Field2Ty, Field2Off);
+ if (!Ret)
+ return false;
+
+ // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
+ // or int+fp structs, but are ignored for a struct with an fp field and
+ // any number of zero-width bitfields.
+ if (Field2Ty && ZeroWidthBitFieldCount > 0)
+ return false;
+ }
+ return Field1Ty != nullptr;
+ }
+
+ return false;
+}
+
+// Determine if a struct is eligible for passing according to the floating
+// point calling convention (i.e., when flattened it contains a single fp
+// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
+// NeededArgGPRs are incremented appropriately.
+bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off,
+ int &NeededArgGPRs,
+ int &NeededArgFPRs) const {
+ Field1Ty = nullptr;
+ Field2Ty = nullptr;
+ NeededArgGPRs = 0;
+ NeededArgFPRs = 0;
+ bool IsCandidate = detectFPCCEligibleStructHelper(
+ Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
+ // Not really a candidate if we have a single int but no float.
+ if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
+ return IsCandidate = false;
+ if (!IsCandidate)
+ return false;
+ if (Field1Ty && Field1Ty->isFloatingPointTy())
+ NeededArgFPRs++;
+ else if (Field1Ty)
+ NeededArgGPRs++;
+ if (Field2Ty && Field2Ty->isFloatingPointTy())
+ NeededArgFPRs++;
+ else if (Field2Ty)
+ NeededArgGPRs++;
+ return IsCandidate;
+}
+
+// Call getCoerceAndExpand for the two-element flattened struct described by
+// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
+// appropriate coerceToType and unpaddedCoerceToType.
+ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
+ llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
+ CharUnits Field2Off) const {
+ SmallVector<llvm::Type *, 3> CoerceElts;
+ SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
+ if (!Field1Off.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
+
+ CoerceElts.push_back(Field1Ty);
+ UnpaddedCoerceElts.push_back(Field1Ty);
+
+ if (!Field2Ty) {
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
+ UnpaddedCoerceElts[0]);
+ }
+
+ CharUnits Field2Align =
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
+ CharUnits Field1Size =
+ CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
+ CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
+
+ CharUnits Padding = CharUnits::Zero();
+ if (Field2Off > Field2OffNoPadNoPack)
+ Padding = Field2Off - Field2OffNoPadNoPack;
+ else if (Field2Off != Field2Align && Field2Off > Field1Size)
+ Padding = Field2Off - Field1Size;
+
+ bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
+
+ if (!Padding.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
+
+ CoerceElts.push_back(Field2Ty);
+ UnpaddedCoerceElts.push_back(Field2Ty);
+
+ auto CoerceToType =
+ llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
+ auto UnpaddedCoerceToType =
+ llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
+
+ return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
+}
+
ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
- int &ArgGPRsLeft) const {
+ int &ArgGPRsLeft,
+ int &ArgFPRsLeft) const {
assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
Ty = useFirstFieldIfTransparentUnion(Ty);
@@ -9257,6 +9474,42 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return ABIArgInfo::getIgnore();
uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Pass floating point values via FPRs if possible.
+ if (IsFixed && Ty->isFloatingType() && FLen >= Size && ArgFPRsLeft) {
+ ArgFPRsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the hard float ABI must be passed direct rather than
+ // using CoerceAndExpand.
+ if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
+ QualType EltTy = Ty->getAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FLen) {
+ ArgFPRsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (IsFixed && FLen && Ty->isStructureOrClassType()) {
+ llvm::Type *Field1Ty = nullptr;
+ llvm::Type *Field2Ty = nullptr;
+ CharUnits Field1Off = CharUnits::Zero();
+ CharUnits Field2Off = CharUnits::Zero();
+ int NeededArgGPRs;
+ int NeededArgFPRs;
+ bool IsCandidate =
+ detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
+ NeededArgGPRs, NeededArgFPRs);
+ if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
+ NeededArgFPRs <= ArgFPRsLeft) {
+ ArgGPRsLeft -= NeededArgGPRs;
+ ArgFPRsLeft -= NeededArgFPRs;
+ return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ }
+ }
+
uint64_t NeededAlign = getContext().getTypeAlign(Ty);
bool MustUseStack = false;
// Determine the number of GPRs needed to pass the current argument
@@ -9315,10 +9568,12 @@ ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getIgnore();
int ArgGPRsLeft = 2;
+ int ArgFPRsLeft = FLen ? 2 : 0;
// The rules for return and argument types are the same, so defer to
// classifyArgumentType.
- return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft);
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
+ ArgFPRsLeft);
}
Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -9353,8 +9608,9 @@ ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
namespace {
class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
- : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen)) {}
+ RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
+ unsigned FLen)
+ : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -9493,9 +9749,16 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
case llvm::Triple::riscv32:
- return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 32));
- case llvm::Triple::riscv64:
- return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 64));
+ case llvm::Triple::riscv64: {
+ StringRef ABIStr = getTarget().getABI();
+ unsigned XLen = getTarget().getPointerWidth(0);
+ unsigned ABIFLen = 0;
+ if (ABIStr.endswith("f"))
+ ABIFLen = 32;
+ else if (ABIStr.endswith("d"))
+ ABIFLen = 64;
+ return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
+ }
case llvm::Triple::systemz: {
bool HasVector = getTarget().getABI() == "vector";
diff --git a/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp b/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
index 6d7d69da4db5..1a66faeb3239 100644
--- a/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
+++ b/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
@@ -24,7 +24,6 @@
#include <vector>
#include <fcntl.h>
-#include <linux/version.h>
#include <sys/epoll.h>
#include <sys/inotify.h>
#include <unistd.h>
@@ -335,7 +334,7 @@ std::unique_ptr<DirectoryWatcher> clang::DirectoryWatcher::create(
InotifyFD, Path.str().c_str(),
IN_CREATE | IN_DELETE | IN_DELETE_SELF | IN_MODIFY |
IN_MOVED_FROM | IN_MOVE_SELF | IN_MOVED_TO | IN_ONLYDIR | IN_IGNORED
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
+#ifdef IN_EXCL_UNLINK
| IN_EXCL_UNLINK
#endif
);
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index cb861f27aeda..2508178423bf 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -501,8 +501,6 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
return codegenoptions::LimitedDebugInfo;
}
-enum class FramePointerKind { None, NonLeaf, All };
-
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
switch (Triple.getArch()){
default:
@@ -517,9 +515,6 @@ static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
static bool useFramePointerForTargetByDefault(const ArgList &Args,
const llvm::Triple &Triple) {
- if (Args.hasArg(options::OPT_pg))
- return true;
-
switch (Triple.getArch()) {
case llvm::Triple::xcore:
case llvm::Triple::wasm32:
@@ -579,22 +574,32 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
return true;
}
-static FramePointerKind getFramePointerKind(const ArgList &Args,
- const llvm::Triple &Triple) {
- Arg *A = Args.getLastArg(options::OPT_fomit_frame_pointer,
- options::OPT_fno_omit_frame_pointer);
- bool OmitFP = A && A->getOption().matches(options::OPT_fomit_frame_pointer);
- bool NoOmitFP =
- A && A->getOption().matches(options::OPT_fno_omit_frame_pointer);
- if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
- (!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
- if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
- options::OPT_mno_omit_leaf_frame_pointer,
- Triple.isPS4CPU()))
- return FramePointerKind::NonLeaf;
- return FramePointerKind::All;
- }
- return FramePointerKind::None;
+static bool shouldUseFramePointer(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
+ options::OPT_fomit_frame_pointer))
+ return A->getOption().matches(options::OPT_fno_omit_frame_pointer) ||
+ mustUseNonLeafFramePointerForTarget(Triple);
+
+ if (Args.hasArg(options::OPT_pg))
+ return true;
+
+ return useFramePointerForTargetByDefault(Args, Triple);
+}
+
+static bool shouldUseLeafFramePointer(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_mno_omit_leaf_frame_pointer,
+ options::OPT_momit_leaf_frame_pointer))
+ return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer);
+
+ if (Args.hasArg(options::OPT_pg))
+ return true;
+
+ if (Triple.isPS4CPU())
+ return false;
+
+ return useFramePointerForTargetByDefault(Args, Triple);
}
/// Add a CC1 option to specify the debug compilation directory.
@@ -3946,12 +3951,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
CmdArgs.push_back("-fdefault-calling-conv=stdcall");
- FramePointerKind FPKeepKind = getFramePointerKind(Args, RawTriple);
- if (FPKeepKind != FramePointerKind::None) {
+ if (shouldUseFramePointer(Args, RawTriple))
CmdArgs.push_back("-mdisable-fp-elim");
- if (FPKeepKind == FramePointerKind::NonLeaf)
- CmdArgs.push_back("-momit-leaf-frame-pointer");
- }
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
options::OPT_fno_zero_initialized_in_bss))
CmdArgs.push_back("-mno-zero-initialized-in-bss");
@@ -4136,6 +4137,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
+ if (!shouldUseLeafFramePointer(Args, RawTriple))
+ CmdArgs.push_back("-momit-leaf-frame-pointer");
+
// Explicitly error on some things we know we don't support and can't just
// ignore.
if (!Args.hasArg(options::OPT_fallow_unsupported)) {
@@ -5489,7 +5493,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_pg))
- if (FPKeepKind == FramePointerKind::None)
+ if (!shouldUseFramePointer(Args, Triple))
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
<< A->getAsString(Args);
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 7445a94cfe59..783d1f9d0919 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -435,7 +435,6 @@ void ASTUnit::CacheCodeCompletionResults() {
| (1LL << CodeCompletionContext::CCC_UnionTag)
| (1LL << CodeCompletionContext::CCC_ClassOrStructTag)
| (1LL << CodeCompletionContext::CCC_Type)
- | (1LL << CodeCompletionContext::CCC_Symbol)
| (1LL << CodeCompletionContext::CCC_SymbolOrNewName)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 8a9844096f08..bc54e38a1a63 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -2408,7 +2408,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
.Cases("cl1.1", "CL1.1", LangStandard::lang_opencl11)
.Cases("cl1.2", "CL1.2", LangStandard::lang_opencl12)
.Cases("cl2.0", "CL2.0", LangStandard::lang_opencl20)
- .Case("c++", LangStandard::lang_openclcpp)
+ .Cases("clc++", "CLC++", LangStandard::lang_openclcpp)
.Default(LangStandard::lang_unspecified);
if (OpenCLLangStd == LangStandard::lang_unspecified) {
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 3906e2ae1b98..6feb7bcbd4b7 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -437,17 +437,17 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
default:
llvm_unreachable("Unsupported OpenCL version");
}
- Builder.defineMacro("CL_VERSION_1_0", "100");
- Builder.defineMacro("CL_VERSION_1_1", "110");
- Builder.defineMacro("CL_VERSION_1_2", "120");
- Builder.defineMacro("CL_VERSION_2_0", "200");
+ }
+ Builder.defineMacro("CL_VERSION_1_0", "100");
+ Builder.defineMacro("CL_VERSION_1_1", "110");
+ Builder.defineMacro("CL_VERSION_1_2", "120");
+ Builder.defineMacro("CL_VERSION_2_0", "200");
- if (TI.isLittleEndian())
- Builder.defineMacro("__ENDIAN_LITTLE__");
+ if (TI.isLittleEndian())
+ Builder.defineMacro("__ENDIAN_LITTLE__");
- if (LangOpts.FastRelaxedMath)
- Builder.defineMacro("__FAST_RELAXED_MATH__");
- }
+ if (LangOpts.FastRelaxedMath)
+ Builder.defineMacro("__FAST_RELAXED_MATH__");
}
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index 3d55f5f2710f..c8fefdfc792a 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -4029,7 +4029,7 @@ _mm_storeu_si128(__m128i_u *__p, __m128i __b)
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void const *__p, __m128i __b)
+_mm_storeu_si64(void *__p, __m128i __b)
{
struct __storeu_si64 {
long long __v;
@@ -4050,7 +4050,7 @@ _mm_storeu_si64(void const *__p, __m128i __b)
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void const *__p, __m128i __b)
+_mm_storeu_si32(void *__p, __m128i __b)
{
struct __storeu_si32 {
int __v;
@@ -4071,7 +4071,7 @@ _mm_storeu_si32(void const *__p, __m128i __b)
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void const *__p, __m128i __b)
+_mm_storeu_si16(void *__p, __m128i __b)
{
struct __storeu_si16 {
short __v;
diff --git a/lib/Headers/opencl-c-base.h b/lib/Headers/opencl-c-base.h
index a82954ddd326..9a23333a33e6 100644
--- a/lib/Headers/opencl-c-base.h
+++ b/lib/Headers/opencl-c-base.h
@@ -126,7 +126,7 @@ typedef double double8 __attribute__((ext_vector_type(8)));
typedef double double16 __attribute__((ext_vector_type(16)));
#endif
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#define NULL ((void*)0)
#endif
@@ -276,7 +276,7 @@ typedef uint cl_mem_fence_flags;
*/
#define CLK_GLOBAL_MEM_FENCE 0x02
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
typedef enum memory_scope {
memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
@@ -288,9 +288,6 @@ typedef enum memory_scope {
#endif
} memory_scope;
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
/**
* Queue a memory fence to ensure correct ordering of memory
* operations between work-items of a work-group to
@@ -313,7 +310,7 @@ typedef enum memory_order
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
@@ -389,14 +386,10 @@ typedef enum memory_order
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-
#define CL_COMPLETE 0x0
#define CL_RUNNING 0x1
#define CL_SUBMITTED 0x2
@@ -435,7 +428,7 @@ typedef struct {
size_t localWorkSize[MAX_WORK_DIM];
} ndrange_t;
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_intel_device_side_avc_motion_estimation
#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
diff --git a/lib/Headers/opencl-c.h b/lib/Headers/opencl-c.h
index 4207c53ccedb..8741bccec9ad 100644
--- a/lib/Headers/opencl-c.h
+++ b/lib/Headers/opencl-c.h
@@ -11,11 +11,11 @@
#include "opencl-c-base.h"
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifndef cl_khr_depth_images
#define cl_khr_depth_images
#endif //cl_khr_depth_images
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
#ifdef cl_khr_3d_image_writes
@@ -23,10 +23,10 @@
#endif //cl_khr_3d_image_writes
#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#define __ovld __attribute__((overloadable))
#define __conv __attribute__((convergent))
@@ -6517,11 +6517,11 @@ size_t __ovld __cnfn get_group_id(uint dimindx);
*/
size_t __ovld __cnfn get_global_offset(uint dimindx);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
size_t __ovld get_enqueued_local_size(uint dimindx);
size_t __ovld get_global_linear_id(void);
size_t __ovld get_local_linear_id(void);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.2, v1.2 s6.12.2, v2.0 s6.13.2 - Math functions
@@ -7352,7 +7352,7 @@ half16 __ovld __cnfn fmod(half16 x, half16 y);
* Returns fmin(x - floor (x), 0x1.fffffep-1f ).
* floor(x) is returned in iptr.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld fract(float x, float *iptr);
float2 __ovld fract(float2 x, float2 *iptr);
float3 __ovld fract(float3 x, float3 *iptr);
@@ -7434,7 +7434,7 @@ half4 __ovld fract(half4 x, __private half4 *iptr);
half8 __ovld fract(half8 x, __private half8 *iptr);
half16 __ovld fract(half16 x, __private half16 *iptr);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Extract mantissa and exponent from x. For each
@@ -7442,7 +7442,7 @@ half16 __ovld fract(half16 x, __private half16 *iptr);
* magnitude in the interval [1/2, 1) or 0. Each
* component of x equals mantissa returned * 2^exp.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld frexp(float x, int *exp);
float2 __ovld frexp(float2 x, int2 *exp);
float3 __ovld frexp(float3 x, int3 *exp);
@@ -7524,7 +7524,7 @@ half4 __ovld frexp(half4 x, __private int4 *exp);
half8 __ovld frexp(half8 x, __private int8 *exp);
half16 __ovld frexp(half16 x, __private int16 *exp);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Compute the value of the square root of x^2 + y^2
@@ -7649,7 +7649,7 @@ half8 __ovld __cnfn lgamma(half8 x);
half16 __ovld __cnfn lgamma(half16 x);
#endif //cl_khr_fp16
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld lgamma_r(float x, int *signp);
float2 __ovld lgamma_r(float2 x, int2 *signp);
float3 __ovld lgamma_r(float3 x, int3 *signp);
@@ -7731,7 +7731,7 @@ half4 __ovld lgamma_r(half4 x, __private int4 *signp);
half8 __ovld lgamma_r(half8 x, __private int8 *signp);
half16 __ovld lgamma_r(half16 x, __private int16 *signp);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Compute natural logarithm.
@@ -7955,7 +7955,7 @@ half16 __ovld __cnfn minmag(half16 x, half16 y);
* the argument. It stores the integral part in the object
* pointed to by iptr.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld modf(float x, float *iptr);
float2 __ovld modf(float2 x, float2 *iptr);
float3 __ovld modf(float3 x, float3 *iptr);
@@ -8037,7 +8037,7 @@ half4 __ovld modf(half4 x, __private half4 *iptr);
half8 __ovld modf(half8 x, __private half8 *iptr);
half16 __ovld modf(half16 x, __private half16 *iptr);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Returns a quiet NaN. The nancode may be placed
@@ -8215,7 +8215,7 @@ half16 __ovld __cnfn remainder(half16 x, half16 y);
* sign as x/y. It stores this signed value in the object
* pointed to by quo.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld remquo(float x, float y, int *quo);
float2 __ovld remquo(float2 x, float2 y, int2 *quo);
float3 __ovld remquo(float3 x, float3 y, int3 *quo);
@@ -8298,7 +8298,7 @@ half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Round to integral value (using round to nearest
* even rounding mode) in floating-point format.
@@ -8439,7 +8439,7 @@ half16 __ovld __cnfn sin(half16);
* is the return value and computed cosine is returned
* in cosval.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld sincos(float x, float *cosval);
float2 __ovld sincos(float2 x, float2 *cosval);
float3 __ovld sincos(float3 x, float3 *cosval);
@@ -8521,7 +8521,7 @@ half4 __ovld sincos(half4 x, __private half4 *cosval);
half8 __ovld sincos(half8 x, __private half8 *cosval);
half16 __ovld sincos(half16 x, __private half16 *cosval);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Compute hyperbolic sine.
@@ -9446,7 +9446,7 @@ ulong16 __ovld __cnfn clz(ulong16 x);
* returns the size in bits of the type of x or
* component type of x, if x is a vector.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
char __ovld ctz(char x);
uchar __ovld ctz(uchar x);
char2 __ovld ctz(char2 x);
@@ -9495,7 +9495,7 @@ long8 __ovld ctz(long8 x);
ulong8 __ovld ctz(ulong8 x);
long16 __ovld ctz(long16 x);
ulong16 __ovld ctz(ulong16 x);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Returns mul_hi(a, b) + c.
@@ -11340,7 +11340,7 @@ half8 __ovld vload8(size_t offset, const __constant half *p);
half16 __ovld vload16(size_t offset, const __constant half *p);
#endif //cl_khr_fp16
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
char2 __ovld vload2(size_t offset, const char *p);
uchar2 __ovld vload2(size_t offset, const uchar *p);
short2 __ovld vload2(size_t offset, const short *p);
@@ -11578,9 +11578,9 @@ half4 __ovld vload4(size_t offset, const __private half *p);
half8 __ovld vload8(size_t offset, const __private half *p);
half16 __ovld vload16(size_t offset, const __private half *p);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld vstore2(char2 data, size_t offset, char *p);
void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
void __ovld vstore2(short2 data, size_t offset, short *p);
@@ -11814,7 +11814,7 @@ void __ovld vstore4(half4 data, size_t offset, __private half *p);
void __ovld vstore8(half8 data, size_t offset, __private half *p);
void __ovld vstore16(half16 data, size_t offset, __private half *p);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Read sizeof (half) bytes of data from address
@@ -11825,13 +11825,13 @@ void __ovld vstore16(half16 data, size_t offset, __private half *p);
* must be 16-bit aligned.
*/
float __ovld vload_half(size_t offset, const __constant half *p);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld vload_half(size_t offset, const half *p);
#else
float __ovld vload_half(size_t offset, const __global half *p);
float __ovld vload_half(size_t offset, const __local half *p);
float __ovld vload_half(size_t offset, const __private half *p);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Read sizeof (halfn) bytes of data from address
@@ -11846,7 +11846,7 @@ float3 __ovld vload_half3(size_t offset, const __constant half *p);
float4 __ovld vload_half4(size_t offset, const __constant half *p);
float8 __ovld vload_half8(size_t offset, const __constant half *p);
float16 __ovld vload_half16(size_t offset, const __constant half *p);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float2 __ovld vload_half2(size_t offset, const half *p);
float3 __ovld vload_half3(size_t offset, const half *p);
float4 __ovld vload_half4(size_t offset, const half *p);
@@ -11868,7 +11868,7 @@ float3 __ovld vload_half3(size_t offset, const __private half *p);
float4 __ovld vload_half4(size_t offset, const __private half *p);
float8 __ovld vload_half8(size_t offset, const __private half *p);
float16 __ovld vload_half16(size_t offset, const __private half *p);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* The float value given by data is first
@@ -11881,7 +11881,7 @@ float16 __ovld vload_half16(size_t offset, const __private half *p);
* The default current rounding mode is round to
* nearest even.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld vstore_half(float data, size_t offset, half *p);
void __ovld vstore_half_rte(float data, size_t offset, half *p);
void __ovld vstore_half_rtz(float data, size_t offset, half *p);
@@ -11927,7 +11927,7 @@ void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
#endif //cl_khr_fp64
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* The floatn value given by data is converted to
@@ -11940,7 +11940,7 @@ void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
* The default current rounding mode is round to
* nearest even.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld vstore_half2(float2 data, size_t offset, half *p);
void __ovld vstore_half3(float3 data, size_t offset, half *p);
void __ovld vstore_half4(float4 data, size_t offset, half *p);
@@ -12146,7 +12146,7 @@ void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
#endif //cl_khr_fp64
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
@@ -12167,7 +12167,7 @@ float3 __ovld vloada_half3(size_t offset, const __constant half *p);
float4 __ovld vloada_half4(size_t offset, const __constant half *p);
float8 __ovld vloada_half8(size_t offset, const __constant half *p);
float16 __ovld vloada_half16(size_t offset, const __constant half *p);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float __ovld vloada_half(size_t offset, const half *p);
float2 __ovld vloada_half2(size_t offset, const half *p);
float3 __ovld vloada_half3(size_t offset, const half *p);
@@ -12193,7 +12193,7 @@ float3 __ovld vloada_half3(size_t offset, const __private half *p);
float4 __ovld vloada_half4(size_t offset, const __private half *p);
float8 __ovld vloada_half8(size_t offset, const __private half *p);
float16 __ovld vloada_half16(size_t offset, const __private half *p);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* The floatn value given by data is converted to
@@ -12211,7 +12211,7 @@ float16 __ovld vloada_half16(size_t offset, const __private half *p);
* mode. The default current rounding mode is
* round to nearest even.
*/
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld vstorea_half(float data, size_t offset, half *p);
void __ovld vstorea_half2(float2 data, size_t offset, half *p);
void __ovld vstorea_half3(float3 data, size_t offset, half *p);
@@ -12496,7 +12496,7 @@ void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
#endif //cl_khr_fp64
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
@@ -12532,10 +12532,10 @@ void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
void __ovld __conv barrier(cl_mem_fence_flags flags);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
@@ -12580,7 +12580,7 @@ void __ovld write_mem_fence(cl_mem_fence_flags flags);
// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
cl_mem_fence_flags __ovld get_fence(const void *ptr);
cl_mem_fence_flags __ovld get_fence(void *ptr);
@@ -12591,7 +12591,7 @@ cl_mem_fence_flags __ovld get_fence(void *ptr);
* where gentype is builtin type or user defined type.
*/
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
@@ -13371,7 +13371,7 @@ unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long v
// OpenCL v2.0 s6.13.11 - Atomics Functions
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// double atomics support requires extensions cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
@@ -13692,7 +13692,7 @@ void __ovld atomic_flag_clear(volatile atomic_flag *object);
void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
@@ -14186,7 +14186,7 @@ half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
#endif //cl_khr_fp16
-#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
@@ -14307,7 +14307,7 @@ int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, f
uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
@@ -14315,7 +14315,7 @@ int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_
int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
@@ -14325,7 +14325,7 @@ int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, f
uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
@@ -14333,7 +14333,7 @@ int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_
int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#ifdef cl_khr_depth_images
float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
@@ -14358,7 +14358,7 @@ float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, in
#endif //cl_khr_gl_msaa_sharing
// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
@@ -14410,9 +14410,9 @@ int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, f
uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
#endif //cl_khr_mipmap_image
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
/**
* Sampler-less Image Access
@@ -14447,7 +14447,7 @@ float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
-#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
// Image read functions returning half4 type
#ifdef cl_khr_fp16
@@ -14457,7 +14457,7 @@ half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler,
half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
@@ -14471,11 +14471,11 @@ half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
-#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#endif //cl_khr_fp16
// Image read functions for read_write images
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
@@ -14518,7 +14518,7 @@ float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 co
float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
@@ -14569,7 +14569,7 @@ int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler,
uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
#endif //cl_khr_mipmap_image
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image read functions returning half4 type
#ifdef cl_khr_fp16
@@ -14580,7 +14580,7 @@ half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Write color value to location specified by coordinate
@@ -14681,7 +14681,7 @@ void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, flo
#endif //cl_khr_depth_images
// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
@@ -14708,7 +14708,7 @@ void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 c
void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
#endif
#endif //cl_khr_mipmap_image
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
#ifdef cl_khr_fp16
@@ -14723,7 +14723,7 @@ void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 col
#endif //cl_khr_fp16
// Image write functions for read_write images
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
@@ -14755,7 +14755,7 @@ void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float col
void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
#endif //cl_khr_depth_images
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
@@ -14782,7 +14782,7 @@ void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 c
void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
#endif
#endif //cl_khr_mipmap_image
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
#ifdef cl_khr_fp16
@@ -14795,7 +14795,7 @@ void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 col
void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
#endif //cl_khr_fp16
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
// access qualifier, which by default assume read_only access qualifier. Image query builtin
@@ -14843,7 +14843,7 @@ int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __cnfn get_image_width(read_write image1d_t image);
int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
int __ovld __cnfn get_image_width(read_write image2d_t image);
@@ -14860,7 +14860,7 @@ int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the image height in pixels.
@@ -14895,7 +14895,7 @@ int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __cnfn get_image_height(read_write image2d_t image);
int __ovld __cnfn get_image_height(read_write image3d_t image);
int __ovld __cnfn get_image_height(read_write image2d_array_t image);
@@ -14909,7 +14909,7 @@ int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the image depth in pixels.
@@ -14920,12 +14920,12 @@ int __ovld __cnfn get_image_depth(read_only image3d_t image);
int __ovld __cnfn get_image_depth(write_only image3d_t image);
#endif
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL Extension v2.0 s9.18 - Mipmaps
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
/**
* Return the image miplevels.
@@ -14961,7 +14961,7 @@ int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
#endif //cl_khr_mipmap_image
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the channel data type. Valid values are:
@@ -15018,7 +15018,7 @@ int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t im
int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
@@ -15035,7 +15035,7 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t im
int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the image channel order. Valid values are:
@@ -15090,7 +15090,7 @@ int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image)
int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
@@ -15107,7 +15107,7 @@ int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image)
int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the 2D image width and height as an int2
@@ -15140,7 +15140,7 @@ int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
#ifdef cl_khr_depth_images
@@ -15153,7 +15153,7 @@ int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
#endif //cl_khr_gl_msaa_sharing
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the 3D image width, height, and depth as an
@@ -15165,9 +15165,9 @@ int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
#ifdef cl_khr_3d_image_writes
int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
#endif
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the image array size.
@@ -15193,7 +15193,7 @@ size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_
size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
#endif //cl_khr_gl_msaa_sharing
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
#ifdef cl_khr_depth_images
@@ -15203,7 +15203,7 @@ size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image
size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
#endif //cl_khr_gl_msaa_sharing
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Return the number of samples associated with image
@@ -15219,17 +15219,17 @@ int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld get_image_num_samples(read_write image2d_msaa_t image);
int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#endif
// OpenCL v2.0 s6.13.15 - Work-group Functions
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __conv work_group_all(int predicate);
int __ovld __conv work_group_any(int predicate);
@@ -15327,16 +15327,16 @@ double __ovld __conv work_group_scan_inclusive_min(double x);
double __ovld __conv work_group_scan_inclusive_max(double x);
#endif //cl_khr_fp64
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v2.0 s6.13.17 - Enqueue Kernels
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
ndrange_t __ovld ndrange_1D(size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t);
@@ -15365,7 +15365,7 @@ bool __ovld is_valid_event (clk_event_t event);
void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
queue_t __ovld get_default_queue(void);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL Extension v2.0 s9.17 - Sub-groups
@@ -15374,16 +15374,16 @@ queue_t __ovld get_default_queue(void);
uint __ovld get_sub_group_size(void);
uint __ovld get_max_sub_group_size(void);
uint __ovld get_num_sub_groups(void);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
uint __ovld get_enqueued_num_sub_groups(void);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
uint __ovld get_sub_group_id(void);
uint __ovld get_sub_group_local_id(void);
void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
-#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __conv sub_group_all(int predicate);
int __ovld __conv sub_group_any(int predicate);
@@ -15573,12 +15573,12 @@ uint2 __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, in
uint4 __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
uint8 __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
uint __ovld __conv intel_sub_group_block_read( const __global uint* p );
uint2 __ovld __conv intel_sub_group_block_read2( const __global uint* p );
@@ -15590,12 +15590,12 @@ void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, i
void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
void __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
@@ -15713,12 +15713,12 @@ uint2 __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t ima
uint4 __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
uint8 __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
uint2 __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
uint4 __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
uint8 __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
uint __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
uint2 __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
@@ -15730,12 +15730,12 @@ void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t im
void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
void __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
@@ -15747,12 +15747,12 @@ ushort2 __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t im
ushort4 __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
ushort8 __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
ushort __ovld __conv intel_sub_group_block_read_us( const __global ushort* p );
ushort2 __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
@@ -15764,12 +15764,12 @@ void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t i
void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort data);
void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
void __ovld __conv intel_sub_group_block_write_us( __global ushort* p, ushort data );
void __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 2756042f23eb..5658f46c99de 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -33,6 +33,7 @@
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
@@ -2399,6 +2400,13 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
Token Tok;
LexUnexpandedToken(Tok);
+ // Ensure we consume the rest of the macro body if errors occur.
+ auto _ = llvm::make_scope_exit([&]() {
+ // The flag indicates if we are still waiting for 'eod'.
+ if (CurLexer->ParsingPreprocessorDirective)
+ DiscardUntilEndOfDirective();
+ });
+
// Used to un-poison and then re-poison identifiers of the __VA_ARGS__ ilk
// within their appropriate context.
VariadicMacroScopeGuard VariadicMacroScopeGuard(*this);
@@ -2420,12 +2428,8 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
} else if (Tok.is(tok::l_paren)) {
// This is a function-like macro definition. Read the argument list.
MI->setIsFunctionLike();
- if (ReadMacroParameterList(MI, LastTok)) {
- // Throw away the rest of the line.
- if (CurPPLexer->ParsingPreprocessorDirective)
- DiscardUntilEndOfDirective();
+ if (ReadMacroParameterList(MI, LastTok))
return nullptr;
- }
// If this is a definition of an ISO C/C++ variadic function-like macro (not
// using the GNU named varargs extension) inform our variadic scope guard
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 11fed28b52db..485d39e2c9e8 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -961,6 +961,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// All dllexport classes should have been processed already.
assert(DelayedDllExportClasses.empty());
+ assert(DelayedDllExportMemberFunctions.empty());
// Remove file scoped decls that turned out to be used.
UnusedFileScopedDecls.erase(
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index a6c52b7d4b2b..8f19edbc4f36 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -1984,10 +1984,27 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
ASTContext::GetBuiltinTypeError Error;
QualType R = Context.GetBuiltinType(ID, Error);
if (Error) {
- if (ForRedeclaration)
- Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
- << getHeaderName(Context.BuiltinInfo, ID, Error)
+ if (!ForRedeclaration)
+ return nullptr;
+
+ // If we have a builtin without an associated type we should not emit a
+ // warning when we were not able to find a type for it.
+ if (Error == ASTContext::GE_Missing_type)
+ return nullptr;
+
+ // If we could not find a type for setjmp it is because the jmp_buf type was
+ // not defined prior to the setjmp declaration.
+ if (Error == ASTContext::GE_Missing_setjmp) {
+ Diag(Loc, diag::warn_implicit_decl_no_jmp_buf)
<< Context.BuiltinInfo.getName(ID);
+ return nullptr;
+ }
+
+ // Generally, we emit a warning that the declaration requires the
+ // appropriate header.
+ Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
+ << getHeaderName(Context.BuiltinInfo, ID, Error)
+ << Context.BuiltinInfo.getName(ID);
return nullptr;
}
@@ -11527,9 +11544,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Check for self-references within variable initializers.
// Variables declared within a function/method body (except for references)
// are handled by a dataflow analysis.
- if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
- VDecl->getType()->isReferenceType()) {
- CheckSelfReference(*this, RealDecl, Init, DirectInit);
+ // This is undefined behavior in C++, but valid in C.
+ if (getLangOpts().CPlusPlus) {
+ if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
+ VDecl->getType()->isReferenceType()) {
+ CheckSelfReference(*this, RealDecl, Init, DirectInit);
+ }
}
// If the type changed, it means we had an incomplete type that was
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 9a6385f28319..15984f89e22d 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -1030,8 +1030,10 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
TemplateArgumentListInfo Args(Loc, Loc);
Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
- // If there's no tuple_size specialization, it's not tuple-like.
- if (lookupStdTypeTraitMember(S, R, Loc, "tuple_size", Args, /*DiagID*/0))
+ // If there's no tuple_size specialization or the lookup of 'value' is empty,
+ // it's not tuple-like.
+ if (lookupStdTypeTraitMember(S, R, Loc, "tuple_size", Args, /*DiagID*/ 0) ||
+ R.empty())
return IsTupleLike::NotTupleLike;
// If we get this far, we've committed to the tuple interpretation, but
@@ -1048,11 +1050,6 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
}
} Diagnoser(R, Args);
- if (R.empty()) {
- Diagnoser.diagnoseNotICE(S, Loc, SourceRange());
- return IsTupleLike::Error;
- }
-
ExprResult E =
S.BuildDeclarationNameExpr(CXXScopeSpec(), R, /*NeedsADL*/false);
if (E.isInvalid())
@@ -6165,8 +6162,8 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
M->dropAttr<DLLExportAttr>();
if (M->hasAttr<DLLExportAttr>()) {
- DefineImplicitSpecialMember(*this, M, M->getLocation());
- ActOnFinishInlineFunctionDef(M);
+ // Define after any fields with in-class initializers have been parsed.
+ DelayedDllExportMemberFunctions.push_back(M);
}
}
};
@@ -11419,6 +11416,21 @@ void Sema::ActOnFinishCXXMemberDecls() {
void Sema::ActOnFinishCXXNonNestedClass(Decl *D) {
referenceDLLExportedClassMethods();
+
+ if (!DelayedDllExportMemberFunctions.empty()) {
+ SmallVector<CXXMethodDecl*, 4> WorkList;
+ std::swap(DelayedDllExportMemberFunctions, WorkList);
+ for (CXXMethodDecl *M : WorkList) {
+ DefineImplicitSpecialMember(*this, M, M->getLocation());
+
+ // Pass the method to the consumer to get emitted. This is not necessary
+ // for explicit instantiation definitions, as they will get emitted
+ // anyway.
+ if (M->getParent()->getTemplateSpecializationKind() !=
+ TSK_ExplicitInstantiationDefinition)
+ ActOnFinishInlineFunctionDef(M);
+ }
+ }
}
void Sema::referenceDLLExportedClassMethods() {
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 705e3b9bd7fb..c1c08969c7bd 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -6794,14 +6794,10 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
// it's legal for the type to be incomplete if this is a pseudo-destructor
// call. We'll do more incomplete-type checks later in the lookup process,
// so just skip this check for ObjC types.
- if (BaseType->isObjCObjectOrInterfaceType()) {
+ if (!BaseType->isRecordType()) {
ObjectType = ParsedType::make(BaseType);
MayBePseudoDestructor = true;
return Base;
- } else if (!BaseType->isRecordType()) {
- ObjectType = nullptr;
- MayBePseudoDestructor = true;
- return Base;
}
// The object type must be complete (or dependent), or
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index bc1069609336..60f34775c6b2 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -1289,7 +1289,16 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// FIXME: Better EqualLoc?
InitializationKind Kind =
InitializationKind::CreateCopy(expr->getBeginLoc(), SourceLocation());
- InitializationSequence Seq(SemaRef, Entity, Kind, expr,
+
+ // Vector elements can be initialized from other vectors in which case
+ // we need initialization entity with a type of a vector (and not a vector
+ // element!) initializing multiple vector elements.
+ auto TmpEntity =
+ (ElemType->isExtVectorType() && !Entity.getType()->isExtVectorType())
+ ? InitializedEntity::InitializeTemporary(ElemType)
+ : Entity;
+
+ InitializationSequence Seq(SemaRef, TmpEntity, Kind, expr,
/*TopLevelOfInitList*/ true);
// C++14 [dcl.init.aggr]p13:
@@ -1300,8 +1309,7 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// assignment-expression.
if (Seq || isa<InitListExpr>(expr)) {
if (!VerifyOnly) {
- ExprResult Result =
- Seq.Perform(SemaRef, Entity, Kind, expr);
+ ExprResult Result = Seq.Perform(SemaRef, TmpEntity, Kind, expr);
if (Result.isInvalid())
hadError = true;
@@ -8108,7 +8116,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
// argument passing.
assert(Step->Type->isSamplerT() &&
"Sampler initialization on non-sampler type.");
- Expr *Init = CurInit.get();
+ Expr *Init = CurInit.get()->IgnoreParens();
QualType SourceType = Init->getType();
// Case 1
if (Entity.isParameterKind()) {
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index bd68011c18b2..4ac87469bf44 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -139,6 +139,7 @@ private:
/// clause, false otherwise.
llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
unsigned AssociatedLoops = 1;
+ bool HasMutipleLoops = false;
const Decl *PossiblyLoopCounter = nullptr;
bool NowaitRegion = false;
bool CancelRegion = false;
@@ -678,12 +679,19 @@ public:
/// Set collapse value for the region.
void setAssociatedLoops(unsigned Val) {
getTopOfStack().AssociatedLoops = Val;
+ if (Val > 1)
+ getTopOfStack().HasMutipleLoops = true;
}
/// Return collapse value for region.
unsigned getAssociatedLoops() const {
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->AssociatedLoops : 0;
}
+ /// Returns true if the construct is associated with multiple loops.
+ bool hasMutipleLoops() const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->HasMutipleLoops : false;
+ }
/// Marks current target region as one with closely nested teams
/// region.
@@ -1874,6 +1882,13 @@ bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
!isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
return true;
}
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (DSAStack->isThreadPrivate(const_cast<VarDecl *>(VD)) &&
+ DSAStack->isForceVarCapturing() &&
+ !DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K == OMPC_copyin; }, Level))
+ return true;
+ }
return DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
(DSAStack->isClauseParsingMode() &&
@@ -5604,13 +5619,14 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
if (ValueDecl *D = ISC.getLoopDecl()) {
auto *VD = dyn_cast<VarDecl>(D);
+ DeclRefExpr *PrivateRef = nullptr;
if (!VD) {
if (VarDecl *Private = isOpenMPCapturedDecl(D)) {
VD = Private;
} else {
- DeclRefExpr *Ref = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
- /*WithInit=*/false);
- VD = cast<VarDecl>(Ref->getDecl());
+ PrivateRef = buildCapture(*this, D, ISC.getLoopDeclRefExpr(),
+ /*WithInit=*/false);
+ VD = cast<VarDecl>(PrivateRef->getDecl());
}
}
DSAStack->addLoopControlVariable(D, VD);
@@ -5623,6 +5639,49 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
Var->getType().getNonLValueExprType(Context),
ForLoc, /*RefersToCapture=*/true));
}
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables
+ // Referenced in a Construct, C/C++]. The loop iteration variable in the
+ // associated for-loop of a simd construct with just one associated
+ // for-loop may be listed in a linear clause with a constant-linear-step
+ // that is the increment of the associated for-loop. The loop iteration
+ // variable(s) in the associated for-loop(s) of a for or parallel for
+ // construct may be listed in a private or lastprivate clause.
+ DSAStackTy::DSAVarData DVar =
+ DSAStack->getTopDSA(D, /*FromParent=*/false);
+ // If LoopVarRefExpr is nullptr it means the corresponding loop variable
+ // is declared in the loop and it is predetermined as a private.
+ Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
+ OpenMPClauseKind PredeterminedCKind =
+ isOpenMPSimdDirective(DKind)
+ ? (DSAStack->hasMutipleLoops() ? OMPC_lastprivate : OMPC_linear)
+ : OMPC_private;
+ if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != PredeterminedCKind && DVar.RefExpr &&
+ (LangOpts.OpenMP <= 45 || (DVar.CKind != OMPC_lastprivate &&
+ DVar.CKind != OMPC_private))) ||
+ ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
+ isOpenMPDistributeDirective(DKind)) &&
+ !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
+ DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
+ (DVar.CKind != OMPC_private || DVar.RefExpr)) {
+ Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPDirectiveName(DKind)
+ << getOpenMPClauseName(PredeterminedCKind);
+ if (DVar.RefExpr == nullptr)
+ DVar.CKind = PredeterminedCKind;
+ reportOriginalDsa(*this, DSAStack, D, DVar,
+ /*IsLoopIterVar=*/true);
+ } else if (LoopDeclRefExpr) {
+ // Make the loop iteration variable private (for worksharing
+ // constructs), linear (for simd directives with the only one
+ // associated loop) or lastprivate (for simd directives with several
+ // collapsed or ordered loops).
+ if (DVar.CKind == OMPC_unknown)
+ DSAStack->addDSA(D, LoopDeclRefExpr, PredeterminedCKind,
+ PrivateRef);
+ }
}
}
DSAStack->setAssociatedLoops(AssociatedLoops - 1);
@@ -5677,8 +5736,6 @@ static bool checkOpenMPIterationSpace(
// Check loop variable's type.
if (ValueDecl *LCDecl = ISC.getLoopDecl()) {
- Expr *LoopDeclRefExpr = ISC.getLoopDeclRefExpr();
-
// OpenMP [2.6, Canonical Loop Form]
// Var is one of the following:
// A variable of signed or unsigned integer type.
@@ -5704,46 +5761,6 @@ static bool checkOpenMPIterationSpace(
// sharing attributes.
VarsWithImplicitDSA.erase(LCDecl);
- // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
- // in a Construct, C/C++].
- // The loop iteration variable in the associated for-loop of a simd
- // construct with just one associated for-loop may be listed in a linear
- // clause with a constant-linear-step that is the increment of the
- // associated for-loop.
- // The loop iteration variable(s) in the associated for-loop(s) of a for or
- // parallel for construct may be listed in a private or lastprivate clause.
- DSAStackTy::DSAVarData DVar = DSA.getTopDSA(LCDecl, false);
- // If LoopVarRefExpr is nullptr it means the corresponding loop variable is
- // declared in the loop and it is predetermined as a private.
- OpenMPClauseKind PredeterminedCKind =
- isOpenMPSimdDirective(DKind)
- ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate)
- : OMPC_private;
- if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != PredeterminedCKind && DVar.RefExpr &&
- (SemaRef.getLangOpts().OpenMP <= 45 ||
- (DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_private))) ||
- ((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
- isOpenMPDistributeDirective(DKind)) &&
- !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
- DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
- (DVar.CKind != OMPC_private || DVar.RefExpr)) {
- SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
- << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
- << getOpenMPClauseName(PredeterminedCKind);
- if (DVar.RefExpr == nullptr)
- DVar.CKind = PredeterminedCKind;
- reportOriginalDsa(SemaRef, &DSA, LCDecl, DVar, /*IsLoopIterVar=*/true);
- HasErrors = true;
- } else if (LoopDeclRefExpr != nullptr) {
- // Make the loop iteration variable private (for worksharing constructs),
- // linear (for simd directives with the only one associated loop) or
- // lastprivate (for simd directives with several collapsed or ordered
- // loops).
- if (DVar.CKind == OMPC_unknown)
- DSA.addDSA(LCDecl, LoopDeclRefExpr, PredeterminedCKind);
- }
-
assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars");
// Check test-expr.
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index b123a739a7ab..9b051e02d127 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -383,25 +383,19 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
} else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
if (!InputExpr->isValueDependent()) {
Expr::EvalResult EVResult;
- if (!InputExpr->EvaluateAsRValue(EVResult, Context, true))
- return StmtError(
- Diag(InputExpr->getBeginLoc(), diag::err_asm_immediate_expected)
- << Info.getConstraintStr() << InputExpr->getSourceRange());
-
- // For compatibility with GCC, we also allow pointers that would be
- // integral constant expressions if they were cast to int.
- llvm::APSInt IntResult;
- if (!EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
- Context))
- return StmtError(
- Diag(InputExpr->getBeginLoc(), diag::err_asm_immediate_expected)
- << Info.getConstraintStr() << InputExpr->getSourceRange());
-
- if (!Info.isValidAsmImmediate(IntResult))
- return StmtError(Diag(InputExpr->getBeginLoc(),
- diag::err_invalid_asm_value_for_constraint)
- << IntResult.toString(10) << Info.getConstraintStr()
- << InputExpr->getSourceRange());
+ if (InputExpr->EvaluateAsRValue(EVResult, Context, true)) {
+ // For compatibility with GCC, we also allow pointers that would be
+ // integral constant expressions if they were cast to int.
+ llvm::APSInt IntResult;
+ if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
+ Context))
+ if (!Info.isValidAsmImmediate(IntResult))
+ return StmtError(Diag(InputExpr->getBeginLoc(),
+ diag::err_invalid_asm_value_for_constraint)
+ << IntResult.toString(10)
+ << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
}
} else {
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 3212281cc34d..ec4b63a2e508 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -362,13 +362,27 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// x->B::f, and we are looking into the type of the object.
assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
LookupCtx = computeDeclContext(ObjectType);
- IsDependent = !LookupCtx;
+ IsDependent = !LookupCtx && ObjectType->isDependentType();
assert((IsDependent || !ObjectType->isIncompleteType() ||
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
- // Template names cannot appear inside an Objective-C class or object type.
- if (ObjectType->isObjCObjectOrInterfaceType()) {
+ // Template names cannot appear inside an Objective-C class or object type
+ // or a vector type.
+ //
+ // FIXME: This is wrong. For example:
+ //
+ // template<typename T> using Vec = T __attribute__((ext_vector_type(4)));
+ // Vec<int> vi;
+ // vi.Vec<int>::~Vec<int>();
+ //
+ // ... should be accepted but we will not treat 'Vec' as a template name
+ // here. The right thing to do would be to check if the name is a valid
+ // vector component name, and look up a template name if not. And similarly
+ // for lookups into Objective-C class and object types, where the same
+ // problem can arise.
+ if (ObjectType->isObjCObjectOrInterfaceType() ||
+ ObjectType->isVectorType()) {
Found.clear();
return false;
}
diff --git a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
index 6f1060b5f26d..600458a743ea 100644
--- a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
@@ -406,13 +406,15 @@ void IteratorChecker::checkPreCall(const CallEvent &Call,
} else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
// Check for out-of-range incrementions and decrementions
- if (Call.getNumArgs() >= 1) {
+ if (Call.getNumArgs() >= 1 &&
+ Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
InstCall->getCXXThisVal(),
Call.getArgSVal(0));
}
} else {
- if (Call.getNumArgs() >= 2) {
+ if (Call.getNumArgs() >= 2 &&
+ Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
Call.getArgSVal(0), Call.getArgSVal(1));
}
@@ -590,14 +592,16 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
return;
} else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (Call.getNumArgs() >= 1) {
+ if (Call.getNumArgs() >= 1 &&
+ Call.getArgExpr(0)->getType()->isIntegralOrEnumerationType()) {
handleRandomIncrOrDecr(C, Func->getOverloadedOperator(),
Call.getReturnValue(),
InstCall->getCXXThisVal(), Call.getArgSVal(0));
return;
}
} else {
- if (Call.getNumArgs() >= 2) {
+ if (Call.getNumArgs() >= 2 &&
+ Call.getArgExpr(1)->getType()->isIntegralOrEnumerationType()) {
handleRandomIncrOrDecr(C, Func->getOverloadedOperator(),
Call.getReturnValue(), Call.getArgSVal(0),
Call.getArgSVal(1));